From 26007b884fc9bbc0da109487eb8b4e8da659244a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 03:20:28 +0000 Subject: [PATCH 01/54] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20keras=5Fsample?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- TensorFlow2/built-in/keras_sample/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow2/built-in/keras_sample/.keep diff --git a/TensorFlow2/built-in/keras_sample/.keep b/TensorFlow2/built-in/keras_sample/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 6faca651dd8e176f9c54d8a516798c7e19bc2122 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 03:21:27 +0000 Subject: [PATCH 02/54] =?UTF-8?q?subclassing=5Fconv=5Flayers=5FID2615=5Ffo?= =?UTF-8?q?r=5FTensorFlow2.X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 201 ++++++++++++ .../README.md | 203 ++++++++++++ .../README_BAK.md | 193 +++++++++++ .../modelzoo_level.txt | 3 + .../npu_convert_dropout.py | 54 +++ .../npu_ops.py | 256 +++++++++++++++ .../requirements.txt | 13 + .../run_1p.sh | 3 + .../subclassing_conv_layers.py | 310 ++++++++++++++++++ .../test/train_full_1p.sh | 167 ++++++++++ .../test/train_performance_1p.sh | 169 ++++++++++ .../test/train_performance_1p_static.sh | 169 ++++++++++ 12 files changed, 1741 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README_BAK.md create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_convert_dropout.py create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_ops.py create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/run_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/subclassing_conv_layers.py create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p_static.sh diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..2f83b0d67 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README.md @@ -0,0 +1,203 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Object Detection** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.04.08** + +**大小(Size):458K** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):h5** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Research** + +**描述(Description):基于TensorFlow2.X框架的图像检测训练代码** + + +

概述

+ +## 简述 + + subclassing_conv_layers网络展示了如何使用Conv.convolution_op()的API实现自定义卷积层,可以重用大部分基础卷积层,只需通过该方法自定义卷积操作本身。而使用 API 的“StandardizedConvolution”实现非常简洁,仅包含四行代码。 + + - 参考论文: + + https://arxiv.org/abs/1903.10520(https://arxiv.org/abs/1903.10520) + + - 参考实现: + https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/subclassing_conv_layers.py(https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/subclassing_conv_layers.py) + + + - 适配昇腾 AI 处理器的实现: + skip + + - 通过Git获取对应commit\_id的代码方法如下:: + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + + + + +## 默认配置 + + +- 网络结构 + - 使用Conv.convolution_op()自定义卷积网络 + - 训练参数个数:34,826 + + +- 训练超参(单卡): + - Batch size: 256 + - num_classes:10 + - input_shape: [28,28,1] + - Train epoch: 5 + + +## 支持特性 + +| 特性列表 | 是否支持 | +|-------|------| +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +相关代码示例。 + +``` +config_proto = tf.ConfigProto(allow_soft_placement=True) + custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add() + custom_op.name = 'NpuOptimizer' + custom_op.parameter_map["use_off_line"].b = True + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") + config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF + session_config = npu_config_proto(config_proto=config_proto) +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + + +

快速上手

+ +## 数据集准备 + +1. 模型训练使用mnist数据集,数据集请用户自行获取。 +2. 数据集下载完毕后,请用户使用keras.datasets.mnist.load_data()直接读取数据。 + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2.1 设置单卡训练参数(脚本位于subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。 + + + ``` + batch_size=256 + #训练step + train_epochs=5 + #学习率 + learning_rate=0.001 + ``` + + + + 2.2 单卡训练指令(subclassing_conv_layers_ID2615_for_TensorFlow2.X/test) + + ``` + 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡 + bash train_full_1p.sh --data_path=xx + 数据集应为npz类型(数据切分可能不同),配置data_path时需指定为data这一层,例:--data_path=/home/data + ├─data + ├─mnist_npz + + ``` + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备 + +- 模型训练 + + 请参考“快速上手”章节 + +

高级参考

+ +## 脚本和示例代码 + + ├── README.md //说明文档 + ├── requirements.txt //依赖 + ├── modelzoo_level.txt //状态文件 + ├── subclassing_conv_layers.py //网络结构定义脚本 + ├── test + | |—— train_full_1p.sh //单卡训练脚本 + | |—— train_performance_1p.sh //单卡训练脚本 + +## 脚本参数 + +``` +batch_size 训练batch_size +learning_rate 初始学习率 +train_epochs 总训练epoch数 +precision_mode default="allow_mix_precision", type=str,help='the path to save over dump data' +over_dump type=ast.literal_eval,help='if or not over detection, default is False' +data_dump_flag type=ast.literal_eval,help='data dump flag, default is False' +data_dump_step data dump step, default is 10 +profiling type=ast.literal_eval help='if or not profiling for performance debug, default is False' +profiling_dump_path type=str, help='the path to save profiling data' +over_dump_path type=str, help='the path to save over dump data' +data_dump_path type=str, help='the path to save dump data' +use_mixlist type=ast.literal_eval,help='use_mixlist flag, default is False' +fusion_off_flag type=ast.literal_eval,help='fusion_off flag, default is False' +mixlist_file type=str,help='mixlist file name, default is ops_info.json' +fusion_off_file type=str,help='fusion_off file name, default is fusion_switch.cfg' +auto_tune help='auto_tune flag, default is False' +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡训练。 +将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README_BAK.md b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README_BAK.md new file mode 100644 index 000000000..b0e12437f --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/README_BAK.md @@ -0,0 +1,193 @@ +# Customizing the convolution operation of a Conv2D layer + +**Author:** [lukewood](https://lukewood.xyz)
+**Date created:** 11/03/2021
+**Last modified:** 11/03/2021
+**Description:** This example shows how to implement custom convolution layers using the `Conv.convolution_op()` API. + + + [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/keras_recipes/ipynb/subclassing_conv_layers.ipynb) [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/subclassing_conv_layers.py) + + + +--- +## Introduction + +You may sometimes need to implement custom versions of convolution layers like `Conv1D` and `Conv2D`. +Keras enables you do this without implementing the entire layer from scratch: you can reuse +most of the base convolution layer and just customize the convolution op itself via the +`convolution_op()` method. + +This method was introduced in Keras 2.7. So before using the +`convolution_op()` API, ensure that you are running Keras version 2.7.0 or greater. + + +```python +import tensorflow.keras as keras + +print(keras.__version__) +``` + +
+``` +2.7.0 + +``` +
+--- +## A Simple `StandardizedConv2D` implementation + +There are two ways to use the `Conv.convolution_op()` API. The first way +is to override the `convolution_op()` method on a convolution layer subclass. +Using this approach, we can quickly implement a +[StandardizedConv2D](https://arxiv.org/abs/1903.10520) as shown below. + + +```python +import tensorflow as tf +import tensorflow.keras as keras +import keras.layers as layers +import numpy as np + + +class StandardizedConv2DWithOverride(layers.Conv2D): + def convolution_op(self, inputs, kernel): + mean, var = tf.nn.moments(kernel, axes=[0, 1, 2], keepdims=True) + return tf.nn.conv2d( + inputs, + (kernel - mean) / tf.sqrt(var + 1e-10), + padding="VALID", + strides=list(self.strides), + name=self.__class__.__name__, + ) + +``` + +The other way to use the `Conv.convolution_op()` API is to directly call the +`convolution_op()` method from the `call()` method of a convolution layer subclass. +A comparable class implemented using this approach is shown below. + + +```python + +class StandardizedConv2DWithCall(layers.Conv2D): + def call(self, inputs): + mean, var = tf.nn.moments(self.kernel, axes=[0, 1, 2], keepdims=True) + result = self.convolution_op( + inputs, (self.kernel - mean) / tf.sqrt(var + 1e-10) + ) + if self.use_bias: + result = result + self.bias + return result + +``` + +--- +## Example Usage + +Both of these layers work as drop-in replacements for `Conv2D`. The following +demonstration performs classification on the MNIST dataset. + + +```python +# Model / data parameters +num_classes = 10 +input_shape = (28, 28, 1) + +# the data, split between train and test sets +(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() + +# Scale images to the [0, 1] range +x_train = x_train.astype("float32") / 255 +x_test = x_test.astype("float32") / 255 +# Make sure images have shape (28, 28, 1) +x_train = np.expand_dims(x_train, -1) +x_test = np.expand_dims(x_test, -1) +print("x_train shape:", x_train.shape) +print(x_train.shape[0], "train samples") +print(x_test.shape[0], "test samples") + +# convert class vectors to binary class matrices +y_train = keras.utils.to_categorical(y_train, num_classes) +y_test = keras.utils.to_categorical(y_test, num_classes) + +model = keras.Sequential( + [ + keras.layers.InputLayer(input_shape=input_shape), + StandardizedConv2DWithCall(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + StandardizedConv2DWithOverride(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(num_classes, activation="softmax"), + ] +) + +model.summary() +``` + +```python +batch_size = 128 +epochs = 5 + +model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) + +model.fit(x_train, y_train, batch_size=batch_size, epochs=5, validation_split=0.1) +``` +
+``` +x_train shape: (60000, 28, 28, 1) +60000 train samples +10000 test samples +Model: "sequential" +_________________________________________________________________ + Layer (type) Output Shape Param # +================================================================= + standardized_conv2d_with_ca (None, 26, 26, 32) 320 + ll (StandardizedConv2DWithC + all) + + max_pooling2d (MaxPooling2D (None, 13, 13, 32) 0 + ) + + standardized_conv2d_with_ov (None, 11, 11, 64) 18496 + erride (StandardizedConv2DW + ithOverride) + + max_pooling2d_1 (MaxPooling (None, 5, 5, 64) 0 + 2D) + + flatten (Flatten) (None, 1600) 0 + + dropout (Dropout) (None, 1600) 0 + + dense (Dense) (None, 10) 16010 + +================================================================= +Total params: 34,826 +Trainable params: 34,826 +Non-trainable params: 0 +_________________________________________________________________ + +Epoch 1/5 +422/422 [==============================] - 7s 15ms/step - loss: 1.8435 - accuracy: 0.8415 - val_loss: 0.1177 - val_accuracy: 0.9660 +Epoch 2/5 +422/422 [==============================] - 6s 14ms/step - loss: 0.2460 - accuracy: 0.9338 - val_loss: 0.0727 - val_accuracy: 0.9772 +Epoch 3/5 +422/422 [==============================] - 6s 14ms/step - loss: 0.1600 - accuracy: 0.9541 - val_loss: 0.0537 - val_accuracy: 0.9862 +Epoch 4/5 +422/422 [==============================] - 6s 14ms/step - loss: 0.1264 - accuracy: 0.9633 - val_loss: 0.0509 - val_accuracy: 0.9845 +Epoch 5/5 +422/422 [==============================] - 6s 14ms/step - loss: 0.1090 - accuracy: 0.9679 - val_loss: 0.0457 - val_accuracy: 0.9872 + + + +``` +
+--- +## Conclusion + +The `Conv.convolution_op()` API provides an easy and readable way to implement custom +convolution layers. A `StandardizedConvolution` implementation using the API is quite +terse, consisting of only four lines of code. diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..a829ab59b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:NOK +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_convert_dropout.py b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_convert_dropout.py new file mode 100644 index 000000000..95f8689ce --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_convert_dropout.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from keras import backend +from keras.utils import control_flow_util +from keras.layers.core import Dropout +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import nn +import npu_ops + +def dropout_call(self, inputs, training=None): + """Make Keras Dropout to execute NPU dropout""" + if training is None: + training = backend.learning_phase() + + def dropped_inputs(): + return npu_ops.dropout( + inputs, + noise_shape=self._get_noise_shape(inputs), + seed=self.seed, + keep_prob=1 - self.rate) + + output = control_flow_util.smart_cond(training, + dropped_inputs, + lambda : array_ops.identity(inputs)) + + return output + +Dropout.call = dropout_call diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_ops.py b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_ops.py new file mode 100644 index 000000000..fa6f8f211 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/npu_ops.py @@ -0,0 +1,256 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Ops for collective operations implemented using hccl.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import numbers +from tensorflow.python.ops import array_ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import ops +from tensorflow.python.eager import context + +from npu_device import gen_npu_ops + + +DEFAULT_GRAPH_SEED = 87654321 +_MAXINT32 = 2**31 - 1 +def LARSV2(input_weight, + input_grad, + weight_decay, + learning_rate, + hyperpara=0.001, + epsilon=0.00001, + use_clip=False, + name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.LARSV2() is not compatible with " + "eager execution.") + + return gen_npu_ops.lars_v2(input_weight=input_weight, + input_grad=input_grad, + weight_decay=weight_decay, + learning_rate=learning_rate, + hyperpara=hyperpara, + epsilon=epsilon, + use_clip=use_clip, + name=name) + + +def _truncate_seed(seed): + return seed % _MAXINT32 # Truncate to fit into 32-bit integer + +def get_seed(op_seed): + global_seed = ops.get_default_graph().seed + + if global_seed is not None: + if op_seed is None: + op_seed = ops.get_default_graph()._last_id + + seeds = _truncate_seed(global_seed), _truncate_seed(op_seed) + else: + if op_seed is not None: + seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed) + else: + seeds = None, None + # Avoid (0, 0) as the C++ ops interpret it as nondeterminism, which would + # be unexpected since Python docs say nondeterminism is (None, None). + if seeds == (0, 0): + return (0, _MAXINT32) + return seeds + +def _get_noise_shape(x, noise_shape): + # If noise_shape is none return immediately. + if noise_shape is None: + return array_ops.shape(x) + + try: + # Best effort to figure out the intended shape. + # If not possible, let the op to handle it. + # In eager mode exception will show up. + noise_shape_ = tensor_shape.as_shape(noise_shape) + except (TypeError, ValueError): + return noise_shape + + if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims): + new_dims = [] + for i, dim in enumerate(x.shape.dims): + if noise_shape_.dims[i].value is None and dim.value is not None: + new_dims.append(dim.value) + else: + new_dims.append(noise_shape_.dims[i].value) + return tensor_shape.TensorShape(new_dims) + + return noise_shape + +def dropout(x, keep_prob, noise_shape=None, seed=None, name=None): + """The gradient for `gelu`. + + Args: + x: A tensor with type is float. + keep_prob: A tensor, float, rate of every element reserved. + noise_shape: A 1-D tensor, with type int32, shape of keep/drop what random + generated. + seed: Random seed. + name: Layer name. + + Returns: + A tensor. + """ + if context.executing_eagerly(): + raise RuntimeError("tf.dropout() is not compatible with " + "eager execution.") + x = ops.convert_to_tensor(x, name="x") + if not x.dtype.is_floating: + raise ValueError("x has to be a floating point tensor since it's going to" + " be scaled. Got a %s tensor instead." % x.dtype) + if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: + raise ValueError("keep_prob must be a scalar tensor or a float in the " + "range (0, 1], got %g" % keep_prob) + if isinstance(keep_prob, float) and keep_prob == 1: + return x + seed, seed2 = get_seed(seed) + noise_shape = _get_noise_shape(x, noise_shape) + gen_out = gen_npu_ops.drop_out_gen_mask(noise_shape, keep_prob, seed, seed2, name) + result = gen_npu_ops.drop_out_do_mask(x, gen_out, keep_prob, name) + return result + +@ops.RegisterGradient("DropOutDoMask") +def _DropOutDoMaskGrad(op, grad): + result = gen_npu_ops.drop_out_do_mask(grad, op.inputs[1], op.inputs[2]) + return [result, None, None] + +def basic_lstm_cell(x, h, c, w, b, keep_prob, forget_bias, state_is_tuple, + activation, name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.basic_lstm_cell() is not compatible with " + "eager execution.") + x = ops.convert_to_tensor(x, name="x") + h = ops.convert_to_tensor(h, name="h") + c = ops.convert_to_tensor(c, name="c") + w = ops.convert_to_tensor(w, name="w") + b = ops.convert_to_tensor(b, name="b") + result = gen_npu_ops.basic_lstm_cell(x, h, c, w, b, keep_prob, forget_bias, state_is_tuple, + activation, name) + return result + +@ops.RegisterGradient("BasicLSTMCell") +def basic_lstm_cell_grad(op, dct, dht, dit, djt, dft, dot, dtanhct): + + dgate, dct_1 = gen_npu_ops.basic_lstm_cell_c_state_grad(op.inputs[2], dht, dct, op.outputs[2], op.outputs[3], op.outputs[4], op.outputs[5], op.outputs[6], forget_bias=op.get_attr("forget_bias"), activation=op.get_attr("activation")) + dw, db = gen_npu_ops.basic_lstm_cell_weight_grad(op.inputs[0], op.inputs[1], dgate) + dxt, dht = gen_npu_ops.basic_lstm_cell_input_grad(dgate, op.inputs[3], keep_prob=op.get_attr("keep_prob")) + + return [dxt, dht, dct_1, dw, db] + +def adam_apply_one_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, add2_y, name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.adam_apply_one_assign() is not compatible with " + "eager execution.") + result = gen_npu_ops.adam_apply_one_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, add2_y,name) + return result + +def adam_apply_one_with_decay_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y, name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.adam_apply_one_with_decay_assign() is not compatible with " + "eager execution.") + result = gen_npu_ops.adam_apply_one_with_decay_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y, name) + return result + +@ops.RegisterGradient("DynamicGruV2") +def dynamic_gru_v2_grad(op, dy, doutput_h, dupdate, dreset, dnew, dhidden_new): + (x, weight_input, weight_hidden, bias_input, bias_hidden, seq_length, init_h) = op.inputs + (y, output_h, update, reset, new, hidden_new) = op.outputs + (dw_input, dw_hidden, db_input, db_hidden, dx, dh_prev) = gen_npu_ops.dynamic_gru_v2_grad(x, weight_input, weight_hidden, y, init_h, output_h, dy, doutput_h, update, reset, new, hidden_new, direction=op.get_attr("direction"), cell_depth=op.get_attr("cell_depth"), keep_prob=op.get_attr("keep_prob"), cell_clip=op.get_attr("cell_clip"), num_proj=op.get_attr("num_proj"), time_major=op.get_attr("time_major"), gate_order=op.get_attr("gate_order"), reset_after=op.get_attr("reset_after")) + + return (dx, dw_input, dw_hidden, db_input, db_hidden, seq_length, dh_prev) + +@ops.RegisterGradient("DynamicRnn") +def dynamic_rnn_grad(op, dy, dh, dc, di, dj, df, do, dtanhc): + (x, w, b, seq_length, init_h, init_c) = op.inputs + (y, output_h, output_c, i, j, f, o, tanhc) = op.outputs + (dw, db, dx, dh_prev, dc_prev) = gen_npu_ops.dynamic_rnn_grad(x, w, b, y, init_h[-1], init_c[-1], output_h, output_c, dy, dh[-1], dc[-1], i, j, f, o, tanhc, cell_type=op.get_attr("cell_type"), direction=op.get_attr("direction"), cell_depth=op.get_attr("cell_depth"), use_peephole=op.get_attr("use_peephole"), keep_prob=op.get_attr("keep_prob"), cell_clip=op.get_attr("cell_clip"), num_proj=op.get_attr("num_proj"), time_major=op.get_attr("time_major"), forget_bias=op.get_attr("forget_bias")) + + return (dx, dw, db, seq_length, dh_prev, dc_prev) + +def lamb_apply_optimizer_assign(input0,input1,input2,input3,mul0_x,mul1_x,mul2_x, + mul3_x,add2_y,steps,do_use_weight,weight_decay_rate,name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.lamb_apply_optimizer_assign() is not compatible with eager execution") + update,nextv,nextm=gen_npu_ops.lamb_apply_optimizer_assign(input0,input1,input2,input3,mul0_x,mul1_x,mul2_x, + mul3_x,add2_y,steps,do_use_weight,weight_decay_rate,name) + return update,nextv,nextm + +def lamb_apply_weight_assign(input0,input1,input2,input3,input4,name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.lamb_apply_weight_assign() is not compatible with eager execution") + result = gen_npu_ops.lamb_apply_weight_assign(input0,input1,input2,input3,input4,name) + return result + +def dropout_v3(x, keep_prob, noise_shape=None, seed=None, name=None): + """ The gradient for gelu + + Args: + x: A tensor with type is float + keep_prob: A tensor, float, rate of every element reserved + noise_shape: A 1-D tensor, with type int32, shape of keep/drop what random generated. + seed: Random seed. + name: Layer name. + + Returns: + A tensor. + """ + x = ops.convert_to_tensor(x,name="x") + if not x.dtype.is_floating: + raise ValueError("x has to be a floating point tensor since it's going to be scaled. Got a %s tensor instead." % x.dtype) + + if isinstance(keep_prob,numbers.Real) and not 0 < keep_prob <=1: + raise ValueError("Keep_prob must be a scalar tensor or a float in the range (0,1], got %g" % keep_prob) + + if isinstance(keep_prob,float) and keep_prob==1: + return x + + seed, seed2 = get_seed(seed) + noise_shape = _get_noise_shape(x,noise_shape) + gen_out = gen_npu_ops.drop_out_gen_mask_v3(noise_shape,keep_prob,seed,seed2,name) + result = gen_npu_ops.drop_out_do_mask_v3(x, gen_out, keep_prob, name) + return result + +@ops.RegisterGradient("DropOutDoMaskV3") +def _DropOutDoMaskV3Grad(op,grad): + result = gen_npu_ops.drop_out_do_mask_v3(grad, op.inputs[1], op.inputs[2]) + return [result, None, None] \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..037077e65 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/requirements.txt @@ -0,0 +1,13 @@ +pygments>=2.7.4 +jinja2 +markdown +requests +mdx_truly_sane_lists +sphinx~=3.0.3 +black==19.10b0 +pathlib +tensorflow +PyYAML +jupyter +keras +pandas \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/run_1p.sh b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/run_1p.sh new file mode 100644 index 000000000..21876811d --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/run_1p.sh @@ -0,0 +1,3 @@ +#!/bin/bash +data_path="" +nohup python3 subclassing_conv_layers.py --epochs=2 --batch_size=256 --data_path=$data_path >$cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/subclassing_conv_layers.py b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/subclassing_conv_layers.py new file mode 100644 index 000000000..93d344533 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/subclassing_conv_layers.py @@ -0,0 +1,310 @@ +""" +Title: Customizing the convolution operation of a Conv2D layer +Author: [lukewood](https://lukewood.xyz) +Date created: 11/03/2021 +Last modified: 11/03/2021 +Description: This example shows how to implement custom convolution layers using the `Conv.convolution_op()` API. +""" +""" +## Introduction + +You may sometimes need to implement custom versions of convolution layers like `Conv1D` and `Conv2D`. +Keras enables you do this without implementing the entire layer from scratch: you can reuse +most of the base convolution layer and just customize the convolution op itself via the +`convolution_op()` method. + +This method was introduced in Keras 2.7. So before using the +`convolution_op()` API, ensure that you are running Keras version 2.7.0 or greater. +""" +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import tensorflow.keras as keras + +print(keras.__version__) +""" +## A Simple `StandardizedConv2D` implementation + +There are two ways to use the `Conv.convolution_op()` API. The first way +is to override the `convolution_op()` method on a convolution layer subclass. +Using this approach, we can quickly implement a +[StandardizedConv2D](https://arxiv.org/abs/1903.10520) as shown below. +""" +import tensorflow as tf +import tensorflow.keras as keras +import keras.layers as layers +import numpy as np +from time import time +import npu_device +import os +import time +from absl import flags, app +import npu_convert_dropout + +# npu_device.open().as_default() + +flags.DEFINE_string(name='data_path', default='/home/hzh/involution/cifar-10-batches-py', + help='dataset path(local)') +flags.DEFINE_integer(name='epochs', default=5, help='training epochs') +flags.DEFINE_integer(name='batch_size', default=128, help='training batch_size') +flags.DEFINE_boolean(name='save_h5', default=True, help='whether save h5 file after training') +flags.DEFINE_integer(name='log_steps', default=234, help='training epochs') +flags.DEFINE_string(name='precision_mode', default= 'allow_fp32_to_fp16', + help='allow_fp32_to_fp16/force_fp16/ ' + 'must_keep_origin_dtype/allow_mix_precision.') +flags.DEFINE_boolean(name='over_dump', default=False, + help='if or not over detection, default is False') +flags.DEFINE_boolean(name='data_dump_flag', default=False, + help='data dump flag, default is False') +flags.DEFINE_string(name='data_dump_step', default="10", + help='data dump step, default is 10') +flags.DEFINE_boolean(name='profiling', default=False, + help='if or not profiling for performance debug, default is False') +flags.DEFINE_string(name='profiling_dump_path', default="/home/data", + help='the path to save profiling data') +flags.DEFINE_string(name='over_dump_path', default="/home/data", + help='the path to save over dump data') +flags.DEFINE_string(name='data_dump_path', default="/home/data", + help='the path to save dump data') +flags.DEFINE_boolean(name='use_mixlist', default=False, + help='whether to enable mixlist, default is True') +flags.DEFINE_boolean(name='fusion_off_flag', default=False, + help='whether to enable mixlist, default is True') +flags.DEFINE_string(name='mixlist_file', default='ops_info.json', + help='mixlist file name, default is ops_info.json') +flags.DEFINE_string(name='fusion_off_file', default='fusion_switch.cfg', + help='fusion_off file name, default is fusion_switch.cfg') +flags.DEFINE_boolean(name='auto_tune', default=False, + help='auto_tune flag, default is False') +flags.DEFINE_integer(name='static', default=0, + help='static, default is 0') +FLAGS = flags.FLAGS + +def npu_config(): + + + npu_config = {} + + if FLAGS.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = FLAGS.data_dump_path + npu_device.global_options().dump_config.dump_step = FLAGS.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if FLAGS.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = FLAGS.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if FLAGS.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + FLAGS.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode=FLAGS.precision_mode + if FLAGS.use_mixlist and FLAGS.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=FLAGS.mixlist_file + if FLAGS.fusion_off_flag: + npu_device.global_options().fusion_switch_file=FLAGS.fusion_off_file + if FLAGS.auto_tune: + npu_device.global_options().auto_tune_mode="RL,GA" + npu_device.open().as_default() +#===============================NPU Migration========================================= + +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + + +def task(_): + class StandardizedConv2DWithOverride(layers.Conv2D): + def convolution_op(self, inputs, kernel): + mean, var = tf.nn.moments(kernel, axes=[0, 1, 2], keepdims=True) + return tf.nn.conv2d( + inputs, + (kernel - mean) / tf.sqrt(var + 1e-10), + padding="VALID", + strides=list(self.strides), + name=self.__class__.__name__, + ) + + + """ + The other way to use the `Conv.convolution_op()` API is to directly call the + `convolution_op()` method from the `call()` method of a convolution layer subclass. + A comparable class implemented using this approach is shown below. + """ + + + class StandardizedConv2DWithCall(layers.Conv2D): + def convolution_op(self, inputs, kernel): + mean, var = tf.nn.moments(kernel, axes=[0, 1, 2], keepdims=True) + return tf.nn.conv2d( + inputs, + (kernel - mean) / tf.sqrt(var + 1e-10), + padding="VALID", + strides=list(self.strides), + name=self.__class__.__name__, + ) + + def call(self, inputs): + mean, var = tf.nn.moments(self.kernel, axes=[0, 1, 2], keepdims=True) + result = self.convolution_op( + inputs, (self.kernel - mean) / tf.sqrt(var + 1e-10) + ) + if self.use_bias: + result = result + self.bias + return result + + + """ + ## Example Usage + + Both of these layers work as drop-in replacements for `Conv2D`. The following + demonstration performs classification on the MNIST dataset. + """ + + npu_config() + + # Model / data parameters + num_classes = 10 + input_shape = (28, 28, 1) + batch_size = FLAGS.batch_size + epochs = FLAGS.epochs + # the data, split between train and test sets + (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data(os.path.join(FLAGS.data_path, 'mnist.npz')) + + # Scale images to the [0, 1] range + x_train = x_train.astype("float32") / 255 + x_test = x_test.astype("float32") / 255 + # Make sure images have shape (28, 28, 1) + x_train = np.expand_dims(x_train, -1) + x_test = np.expand_dims(x_test, -1) + print("x_train shape:", x_train.shape) + print(x_train.shape[0], "train samples") + print(x_test.shape[0], "test samples") + + # convert class vectors to binary class matrices + y_train = keras.utils.to_categorical(y_train, num_classes) + y_test = keras.utils.to_categorical(y_test, num_classes) + if FLAGS.static==1: + train_ds = ( + tf.data.Dataset.from_tensor_slices((x_train, y_train)) + .batch(batch_size, drop_remainder=True)) + else: + train_ds = ( + tf.data.Dataset.from_tensor_slices((x_train, y_train)) + .batch(batch_size, drop_remainder=False)) + train_ds = train_ds.prefetch(tf.data.experimental.AUTOTUNE) + model = keras.Sequential( + [ + keras.layers.InputLayer(input_shape=input_shape), + StandardizedConv2DWithCall(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + StandardizedConv2DWithOverride(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(num_classes, activation="softmax"), + ] + ) + + model.summary() + """ + + """ + + + model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) + callbacks = [TimeHistory(batch_size,FLAGS.log_steps)] + #start_time = time() + #model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1, verbose=2) + model.fit(train_ds, batch_size=batch_size, epochs=epochs, verbose=2, callbacks=callbacks) + #end_time = time() + #time_s = end_time - start_time + #print("TrainingTime: ", time_s) + + if FLAGS.save_h5: + model.save("model.h5") + """ + ## Conclusion + + The `Conv.convolution_op()` API provides an easy and readable way to implement custom + convolution layers. A `StandardizedConvolution` implementation using the API is quite + terse, consisting of only four lines of code. + """ + + +if __name__ == '__main__': + app.run(task) diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..278486c1a --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,167 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=256 +#网络名称,同目录名称 +Network="subclassing_conv_layers_ID2615_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=5 +#训练step +#train_steps=50000 +#学习率 +# learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_full_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 subclassing_conv_layers.py \ + --data_path=$data_path \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --auto_tune=${auto_tune} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path}>$cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +#FPS=`awk 'BEGIN{printf "%.2f\n",'211'*'${batch_size}'/'${TrainingTime}'}'` +TrainingTime=`grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"` +FPS=`grep TimeHistory: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $4}'|tail -1` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$(NF-0)}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep loss | awk '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..5a8035e4f --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,169 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=256 +#网络名称,同目录名称 +Network="subclassing_conv_layers_ID2615_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=2 +#训练step +#train_steps=50000 +#学习率 +# learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 subclassing_conv_layers.py \ + --data_path=$data_path \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --auto_tune=${auto_tune} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --static=0 \ + --log_steps=235 >$cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep 235/235 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"` +#FPS=`awk 'BEGIN{printf "%.2f\n",'235'*'${batch_size}'/'${TrainingTime}'}'` +FPS=`grep TimeHistory: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $4}'|tail -1` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$(NF-0)}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep loss | awk '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p_static.sh new file mode 100644 index 000000000..25a5b597b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/subclassing_conv_layers_ID2615_for_TensorFlow2.X/test/train_performance_1p_static.sh @@ -0,0 +1,169 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=256 +#网络名称,同目录名称 +Network="subclassing_conv_layers_ID2615_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=3 +#训练step +#train_steps=50000 +#学习率 +# learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 subclassing_conv_layers.py \ + --data_path=$data_path \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --auto_tune=${auto_tune} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --static=1 \ + --log_steps=234>$cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep 234/ $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"` +#FPS=`awk 'BEGIN{printf "%.2f\n",'234'*'${batch_size}'/'${TrainingTime}'}'` +FPS=`grep TimeHistory: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $4}'|tail -1` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$(NF-0)}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep loss | awk '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}_static" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file -- Gitee From 742d749d99f38343d5d463cd7153e749f30f28eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 03:21:38 +0000 Subject: [PATCH 03/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/.keep?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- TensorFlow2/built-in/keras_sample/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 TensorFlow2/built-in/keras_sample/.keep diff --git a/TensorFlow2/built-in/keras_sample/.keep b/TensorFlow2/built-in/keras_sample/.keep deleted file mode 100644 index e69de29bb..000000000 -- Gitee From 5b9a7ed71a8460b787b774d4998e7ef7148a0f0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:15:07 +0000 Subject: [PATCH 04/54] =?UTF-8?q?zero=5Fdce=5FID2548=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../zero_dce_ID2548_for_TensorFlow2.X/LICENSE | 21 + .../README.md | 197 ++++++ .../modelzoo_level.txt | 3 + .../requirements.txt | 0 .../test/train_full_1p.sh | 231 +++++++ .../test/train_performance_1p.sh | 230 +++++++ .../zero_dce.py | 609 ++++++++++++++++++ 7 files changed, 1291 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/zero_dce.py diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..51d555a15 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Ke YU + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..350ffb972 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/README.md @@ -0,0 +1,197 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Image Classification** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.4.8** + +**大小(Size):324KB** + +**框架(Framework):TensorFlow_2.4.1** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Official** + +**描述(Description):基于TensorFlow框架的计算机视觉和模式识别网络训练代码** + +

概述

+ +## 简述 + +- 参考论文: + + https://arxiv.org/abs/1810.03312 + +- 参考实现: + + https://github.com/yuke93/RL-Restore + + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + + +## 默认配置 +- 网络结构 + - 24-layer, 1024-hidden, 16-heads, 340M parameters + +- 训练超参(单卡): + - Batch size: 16 + - Train epoch: 100 + + +## 支持特性 + +| 特性列表 | 是否支持 | +|-------|------| +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +拉起脚本中,传入--precision_mode='allow_mix_precision' + +``` + ./train_full_1p.sh --help + +parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message +``` + +相关代码示例: + +``` +flags.DEFINE_string(name='precision_mode', default= 'allow_fp32_to_fp16', + help='allow_fp32_to_fp16/force_fp16/ ' + 'must_keep_origin_dtype/allow_mix_precision.') + +npu_device.global_options().precision_mode=FLAGS.precision_mode +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + +

快速上手

+ +## 数据集准备 + +1、用户自行准备好数据集。使用的数据集是lol_dataset + +数据集目录参考如下: + +``` +├── lol_dataset +│ ├──eval15 +│ │ ├──high + ├──...... +│ │ ├──low + ├──...... +│ ├──our485 +│ │ ├──high + ├──...... +│ │ ├──low + ├──...... +``` + + + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 + +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2.1 单卡训练指令(脚本位于zero_dce_ID2548_for_TensorFlow2.X/test/train_full_1p.sh),其中“--data_path”修改为数据集的的路径。 + + +

高级参考

+ +## 脚本和示例代码 + +``` +|--test #训练脚本目录 +| |--train_full_1p.sh +| |--train_performance_1p.sh +| |--...... +|--zero_dce.py +|--...... +``` + +## 脚本参数 + +``` + --batch_size Total batch size for training,default:16 + --epochs epochs ,default:100 + --learning_rate learning_rate,default:1e-4 + --data_path data_path,default:./lol_dataset + --log_steps steps per log,default:1e-4 + --precision_mode the path to save over dump data,default:allow_mix_precision + --over_dump if or not over detection,default:False + --data_dump_flag data dump flag, default:False + --data_dump_step data dump step, default:10 + --profiling profiling,default:False + --profiling_dump_path profiling_dump_path,default:/home/data + --over_dump_path over_dump_path,default:/home/data + --data_dump_path data_dump_path,default:/home/data + --use_mixlist use_mixlist flag,default:False + --fusion_off_flag fusion_off flag,default:False + --mixlist_file mixlist file name,default:ops_info.json + --fusion_off_file fusion_off_file,default:100 + --auto_tune auto_tune flag, default:False +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡训练。 +将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 + + + diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..9f9b36084 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:PERFECT +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..80ef963c3 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,231 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 + +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=0 + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="zero_dce_ID2548_for_TensorFlow2.X" +#训练epoch +train_epochs=100 +#训练batch_size +batch_size=16 + +# #维测参数,precision_mode需要模型审视修改 +# precision_mode="allow_mix_precision" +# #维持参数,以下不需要修改 +# over_dump=False +# data_dump_flag=False +# data_dump_step="10" +# profiling=False +# autotune=False + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="ops_info.json" +fusion_off_flag=False +fusion_off_file="fusion_switch.cfg" +auto_tune=False +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + --auto_tune if or not auto_tune, default is False + -h/--help show help message + " + exit 1 +fi + +# #参数校验,不需要修改 +# for para in $* +# do +# if [[ $para == --precision_mode* ]];then +# precision_mode=`echo ${para#*=}` +# elif [[ $para == --over_dump* ]];then +# over_dump=`echo ${para#*=}` +# over_dump_path=${cur_path}/output/overflow_dump +# mkdir -p ${over_dump_path} +# elif [[ $para == --data_dump_flag* ]];then +# data_dump_flag=`echo ${para#*=}` +# data_dump_path=${cur_path}/output/data_dump +# mkdir -p ${data_dump_path} +# elif [[ $para == --data_dump_step* ]];then +# data_dump_step=`echo ${para#*=}` +# elif [[ $para == --profiling* ]];then +# profiling=`echo ${para#*=}` +# profiling_dump_path=${cur_path}/output/profiling +# mkdir -p ${profiling_dump_path} +# elif [[ $para == --data_path* ]];then +# data_path=`echo ${para#*=}` +# fi +# done + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done +############维测参数############## + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + python3 zero_dce.py \ + --data_path=$data_path/lol_dataset/ \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --auto_tune=${auto_tune} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +# Time=`grep "ms/step" $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log| tail -n 1 | awk -F'-' '{print $2}' | awk -F' ' '{print $2}' | awk -F'ms' '{print $1}'` +# FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${Time}'}'` +single_batch_step_sec=`grep TimeHistory $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=null +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" +echo "E2E Training Duration sec : $e2e_time" + +#稳定性精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据 +#吞吐量,不需要修改 +ActualFPS=${FPS} +#单迭代训练时长,不需要修改 +# TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*1000/'${FPS}'}'` +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +# cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| tr -d '\b\r' | grep -Eo "ms/step - total_loss: [0-9]*\.[0-9]*" | awk -F":" '{print $2}'>> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep total_loss | awk -F " " '{print $6}' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +# ActualLoss=`cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| tr -d '\b\r' | grep -Eo "ms/step - total_loss: [0-9]*\.[0-9]*" | awk -F":" '{print $2}' | tail -n 1` +ActualLoss=`awk 'END {print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +train_accuracy=${ActualLoss} + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..458bef301 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,230 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 + +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=0 + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="zero_dce_ID2548_for_TensorFlow2.X" +#训练epoch +train_epochs=50 +#训练batch_size +batch_size=16 + +# #维测参数,precision_mode需要模型审视修改 +# precision_mode="allow_mix_precision" +# #维持参数,以下不需要修改 +# over_dump=False +# data_dump_flag=False +# data_dump_step="10" +# profiling=False +# autotune=False + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="ops_info.json" +fusion_off_flag=False +fusion_off_file="fusion_switch.cfg" +auto_tune=False +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + --auto_tune if or not auto_tune, default is False + -h/--help show help message + " + exit 1 +fi + +# #参数校验,不需要修改 +# for para in $* +# do +# if [[ $para == --precision_mode* ]];then +# precision_mode=`echo ${para#*=}` +# elif [[ $para == --over_dump* ]];then +# over_dump=`echo ${para#*=}` +# over_dump_path=${cur_path}/output/overflow_dump +# mkdir -p ${over_dump_path} +# elif [[ $para == --data_dump_flag* ]];then +# data_dump_flag=`echo ${para#*=}` +# data_dump_path=${cur_path}/output/data_dump +# mkdir -p ${data_dump_path} +# elif [[ $para == --data_dump_step* ]];then +# data_dump_step=`echo ${para#*=}` +# elif [[ $para == --profiling* ]];then +# profiling=`echo ${para#*=}` +# profiling_dump_path=${cur_path}/output/profiling +# mkdir -p ${profiling_dump_path} +# elif [[ $para == --data_path* ]];then +# data_path=`echo ${para#*=}` +# fi +# done + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done +############维测参数############## + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + python3 zero_dce.py \ + --data_path=$data_path/lol_dataset/ \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --auto_tune=${auto_tune} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +# Time=`grep "ms/step" $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log| tail -n 1 | awk -F'-' '{print $2}' | awk -F' ' '{print $2}' | awk -F'ms' '{print $1}'` +# FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${Time}'}'` +single_batch_step_sec=`grep TimeHistory $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=null +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" +echo "E2E Training Duration sec : $e2e_time" + +#稳定性精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据 +#吞吐量,不需要修改 +ActualFPS=${FPS} +#单迭代训练时长,不需要修改 +# TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*1000/'${FPS}'}'` +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +# cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| tr -d '\b\r' | grep -Eo "ms/step - total_loss: [0-9]*\.[0-9]*" | awk -F":" '{print $2}'>> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep total_loss | awk -F " " '{print $6}' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +# ActualLoss=`cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| tr -d '\b\r' | grep -Eo "ms/step - total_loss: [0-9]*\.[0-9]*" | awk -F":" '{print $2}' | tail -n 1` +ActualLoss=`awk 'END {print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +train_accuracy=${ActualLoss} + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/zero_dce.py b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/zero_dce.py new file mode 100644 index 000000000..367f3982f --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/zero_dce_ID2548_for_TensorFlow2.X/zero_dce.py @@ -0,0 +1,609 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Title: Zero-DCE for low-light image enhancement +Author: [Soumik Rakshit](http://github.com/soumik12345) +Date created: 2021/09/18 +Last modified: 2021/09/19 +Description: Implementing Zero-Reference Deep Curve Estimation for low-light image enhancement. +""" +""" +## Introduction + +**Zero-Reference Deep Curve Estimation** or **Zero-DCE** formulates low-light image +enhancement as the task of estimating an image-specific +[*tonal curve*](https://en.wikipedia.org/wiki/Curve_(tonality)) with a deep neural network. +In this example, we train a lightweight deep network, **DCE-Net**, to estimate +pixel-wise and high-order tonal curves for dynamic range adjustment of a given image. + +Zero-DCE takes a low-light image as input and produces high-order tonal curves as its output. +These curves are then used for pixel-wise adjustment on the dynamic range of the input to +obtain an enhanced image. The curve estimation process is done in such a way that it maintains +the range of the enhanced image and preserves the contrast of neighboring pixels. This +curve estimation is inspired by curves adjustment used in photo editing software such as +Adobe Photoshop where users can adjust points throughout an image’s tonal range. + +Zero-DCE is appealing because of its relaxed assumptions with regard to reference images: +it does not require any input/output image pairs during training. +This is achieved through a set of carefully formulated non-reference loss functions, +which implicitly measure the enhancement quality and guide the training of the network. + +### References + +- [Zero-Reference Deep Curve Estimation for Low-Light Image Enhancement](https://arxiv.org/pdf/2001.06826.pdf) +- [Curves adjustment in Adobe Photoshop](https://helpx.adobe.com/photoshop/using/curves-adjustment.html) +""" + +""" +## Downloading LOLDataset + +The **LoL Dataset** has been created for low-light image enhancement. It provides 485 +images for training and 15 for testing. Each image pair in the dataset consists of a +low-light input image and its corresponding well-exposed reference image. +""" + +import npu_device +import argparse +import ast +#===============================NPU Migration========================================= +parser = argparse.ArgumentParser() +parser.add_argument('--batch_size', type=int, default=16, help='batch_size') +parser.add_argument('--epochs', type=int, default=100, help='epochs') +parser.add_argument('--learning_rate', type=int, default=1e-4, help='learning_rate') +parser.add_argument('--data_path', type=str, default='./lol_dataset', help='data path') +parser.add_argument('--log_steps', type=int, default=25, help='steps per log') +parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') +parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') +parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') +parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') +parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') +parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') +parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') +parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') +parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') +parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') +parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') +parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') +parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval, + help='auto_tune flag, default is False') +args = parser.parse_args() + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist="../configs/"+args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file="../configs/"+args.fusion_off_file + if args.auto_tune: + npu_device.global_options().auto_tune_mode="RL,GA" + npu_device.open().as_default() +#===============================NPU Migration========================================= +npu_config() + +import os +import time +import random +import numpy as np +from glob import glob +from PIL import Image, ImageOps +import matplotlib.pyplot as plt + +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import layers + +# def init_arg(): +# parser = argparse.ArgumentParser() +# parser.add_argument('--batch_size', type=int, default=16, help='batch_size') +# parser.add_argument('--epochs', type=int, default=100, help='epochs') +# parser.add_argument('--learning_rate', type=int, default=1e-4, help='learning_rate') +# parser.add_argument('--data_path', type=str, default='./lol_dataset', help='data path') +# return parser.parse_args() + + +# args = init_arg() + +"""shell +gdown https://drive.google.com/uc?id=1DdGIJ4PZPlF2ikl8mNM9V-PdVxVLbQi6 +unzip -q lol_dataset.zip +""" + +""" +## Creating a TensorFlow Dataset + +We use 300 low-light images from the LoL Dataset training set for training, and we use +the remaining 185 low-light images for validation. We resize the images to size `256 x +256` to be used for both training and validation. Note that in order to train the DCE-Net, +we will not require the corresponding enhanced images. +""" + +IMAGE_SIZE = 256 +BATCH_SIZE = args.batch_size +MAX_TRAIN_IMAGES = 400 + + +def load_data(image_path): + image = tf.io.read_file(image_path) + image = tf.image.decode_png(image, channels=3) + image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE]) + image = image / 255.0 + return image + + +def data_generator(low_light_images): + dataset = tf.data.Dataset.from_tensor_slices((low_light_images)) + dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE) + dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) + return dataset + + +train_low_light_images = sorted(glob(args.data_path + "/our485/low/*"))[:MAX_TRAIN_IMAGES] +val_low_light_images = sorted(glob(args.data_path + "/our485/low/*"))[MAX_TRAIN_IMAGES:] +test_low_light_images = sorted(glob(args.data_path + "/eval15/low/*")) + + +train_dataset = data_generator(train_low_light_images) +val_dataset = data_generator(val_low_light_images) + +print("Train Dataset:", train_dataset) +print("Validation Dataset:", val_dataset) + +""" +## The Zero-DCE Framework + +The goal of DCE-Net is to estimate a set of best-fitting light-enhancement curves +(LE-curves) given an input image. The framework then maps all pixels of the input’s RGB +channels by applying the curves iteratively to obtain the final enhanced image. + +### Understanding light-enhancement curves + +A ligh-enhancement curve is a kind of curve that can map a low-light image +to its enhanced version automatically, +where the self-adaptive curve parameters are solely dependent on the input image. +When designing such a curve, three objectives should be taken into account: + +- Each pixel value of the enhanced image should be in the normalized range `[0,1]`, in order to +avoid information loss induced by overflow truncation. +- It should be monotonous, to preserve the contrast between neighboring pixels. +- The shape of this curve should be as simple as possible, +and the curve should be differentiable to allow backpropagation. + +The light-enhancement curve is separately applied to three RGB channels instead of solely on the +illumination channel. The three-channel adjustment can better preserve the inherent color and reduce +the risk of over-saturation. + +![](https://li-chongyi.github.io/Zero-DCE_files/framework.png) + +### DCE-Net + +The DCE-Net is a lightweight deep neural network that learns the mapping between an input +image and its best-fitting curve parameter maps. The input to the DCE-Net is a low-light +image while the outputs are a set of pixel-wise curve parameter maps for corresponding +higher-order curves. It is a plain CNN of seven convolutional layers with symmetrical +concatenation. Each layer consists of 32 convolutional kernels of size 3×3 and stride 1 +followed by the ReLU activation function. The last convolutional layer is followed by the +Tanh activation function, which produces 24 parameter maps for 8 iterations, where each +iteration requires three curve parameter maps for the three channels. + +![](https://i.imgur.com/HtIg34W.png) +""" + + +def build_dce_net(): + input_img = keras.Input(shape=[None, None, 3]) + conv1 = layers.Conv2D( + 32, (3, 3), strides=(1, 1), activation="relu", padding="same" + )(input_img) + conv2 = layers.Conv2D( + 32, (3, 3), strides=(1, 1), activation="relu", padding="same" + )(conv1) + conv3 = layers.Conv2D( + 32, (3, 3), strides=(1, 1), activation="relu", padding="same" + )(conv2) + conv4 = layers.Conv2D( + 32, (3, 3), strides=(1, 1), activation="relu", padding="same" + )(conv3) + int_con1 = layers.Concatenate(axis=-1)([conv4, conv3]) + conv5 = layers.Conv2D( + 32, (3, 3), strides=(1, 1), activation="relu", padding="same" + )(int_con1) + int_con2 = layers.Concatenate(axis=-1)([conv5, conv2]) + conv6 = layers.Conv2D( + 32, (3, 3), strides=(1, 1), activation="relu", padding="same" + )(int_con2) + int_con3 = layers.Concatenate(axis=-1)([conv6, conv1]) + x_r = layers.Conv2D(24, (3, 3), strides=(1, 1), activation="tanh", padding="same")( + int_con3 + ) + return keras.Model(inputs=input_img, outputs=x_r) + + +""" +## Loss functions + +To enable zero-reference learning in DCE-Net, we use a set of differentiable +zero-reference losses that allow us to evaluate the quality of enhanced images. +""" + +""" +### Color constancy loss + +The *color constancy loss* is used to correct the potential color deviations in the +enhanced image. +""" + + +def color_constancy_loss(x): + mean_rgb = tf.reduce_mean(x, axis=(1, 2), keepdims=True) + mr, mg, mb = mean_rgb[:, :, :, 0], mean_rgb[:, :, :, 1], mean_rgb[:, :, :, 2] + d_rg = tf.square(mr - mg) + d_rb = tf.square(mr - mb) + d_gb = tf.square(mb - mg) + return tf.sqrt(tf.square(d_rg) + tf.square(d_rb) + tf.square(d_gb)) + + +""" +### Exposure loss + +To restrain under-/over-exposed regions, we use the *exposure control loss*. +It measures the distance between the average intensity value of a local region +and a preset well-exposedness level (set to `0.6`). +""" + + +def exposure_loss(x, mean_val=0.6): + x = tf.reduce_mean(x, axis=3, keepdims=True) + mean = tf.nn.avg_pool2d(x, ksize=16, strides=16, padding="VALID") + return tf.reduce_mean(tf.square(mean - mean_val)) + + +""" +### Illumination smoothness loss + +To preserve the monotonicity relations between neighboring pixels, the +*illumination smoothness loss* is added to each curve parameter map. +""" + + +def illumination_smoothness_loss(x): + batch_size = tf.shape(x)[0] + h_x = tf.shape(x)[1] + w_x = tf.shape(x)[2] + count_h = (tf.shape(x)[2] - 1) * tf.shape(x)[3] + count_w = tf.shape(x)[2] * (tf.shape(x)[3] - 1) + h_tv = tf.reduce_sum(tf.square((x[:, 1:, :, :] - x[:, : h_x - 1, :, :]))) + w_tv = tf.reduce_sum(tf.square((x[:, :, 1:, :] - x[:, :, : w_x - 1, :]))) + batch_size = tf.cast(batch_size, dtype=tf.float32) + count_h = tf.cast(count_h, dtype=tf.float32) + count_w = tf.cast(count_w, dtype=tf.float32) + return 2 * (h_tv / count_h + w_tv / count_w) / batch_size + + +""" +### Spatial consistency loss + +The *spatial consistency loss* encourages spatial coherence of the enhanced image by +preserving the contrast between neighboring regions across the input image and its enhanced version. +""" + + +class SpatialConsistencyLoss(keras.losses.Loss): + def __init__(self, **kwargs): + super(SpatialConsistencyLoss, self).__init__(reduction="none") + + self.left_kernel = tf.constant( + [[[[0, 0, 0]], [[-1, 1, 0]], [[0, 0, 0]]]], dtype=tf.float32 + ) + self.right_kernel = tf.constant( + [[[[0, 0, 0]], [[0, 1, -1]], [[0, 0, 0]]]], dtype=tf.float32 + ) + self.up_kernel = tf.constant( + [[[[0, -1, 0]], [[0, 1, 0]], [[0, 0, 0]]]], dtype=tf.float32 + ) + self.down_kernel = tf.constant( + [[[[0, 0, 0]], [[0, 1, 0]], [[0, -1, 0]]]], dtype=tf.float32 + ) + + def call(self, y_true, y_pred): + + original_mean = tf.reduce_mean(y_true, 3, keepdims=True) + enhanced_mean = tf.reduce_mean(y_pred, 3, keepdims=True) + original_pool = tf.nn.avg_pool2d( + original_mean, ksize=4, strides=4, padding="VALID" + ) + enhanced_pool = tf.nn.avg_pool2d( + enhanced_mean, ksize=4, strides=4, padding="VALID" + ) + + d_original_left = tf.nn.conv2d( + original_pool, self.left_kernel, strides=[1, 1, 1, 1], padding="SAME" + ) + d_original_right = tf.nn.conv2d( + original_pool, self.right_kernel, strides=[1, 1, 1, 1], padding="SAME" + ) + d_original_up = tf.nn.conv2d( + original_pool, self.up_kernel, strides=[1, 1, 1, 1], padding="SAME" + ) + d_original_down = tf.nn.conv2d( + original_pool, self.down_kernel, strides=[1, 1, 1, 1], padding="SAME" + ) + + d_enhanced_left = tf.nn.conv2d( + enhanced_pool, self.left_kernel, strides=[1, 1, 1, 1], padding="SAME" + ) + d_enhanced_right = tf.nn.conv2d( + enhanced_pool, self.right_kernel, strides=[1, 1, 1, 1], padding="SAME" + ) + d_enhanced_up = tf.nn.conv2d( + enhanced_pool, self.up_kernel, strides=[1, 1, 1, 1], padding="SAME" + ) + d_enhanced_down = tf.nn.conv2d( + enhanced_pool, self.down_kernel, strides=[1, 1, 1, 1], padding="SAME" + ) + + d_left = tf.square(d_original_left - d_enhanced_left) + d_right = tf.square(d_original_right - d_enhanced_right) + d_up = tf.square(d_original_up - d_enhanced_up) + d_down = tf.square(d_original_down - d_enhanced_down) + return d_left + d_right + d_up + d_down + + +""" +### Deep curve estimation model + +We implement the Zero-DCE framework as a Keras subclassed model. +""" + + +class ZeroDCE(keras.Model): + def __init__(self, **kwargs): + super(ZeroDCE, self).__init__(**kwargs) + self.dce_model = build_dce_net() + + def compile(self, learning_rate, **kwargs): + super(ZeroDCE, self).compile(**kwargs) + self.optimizer = keras.optimizers.Adam(learning_rate=learning_rate) + self.spatial_constancy_loss = SpatialConsistencyLoss(reduction="none") + + def get_enhanced_image(self, data, output): + r1 = output[:, :, :, :3] + r2 = output[:, :, :, 3:6] + r3 = output[:, :, :, 6:9] + r4 = output[:, :, :, 9:12] + r5 = output[:, :, :, 12:15] + r6 = output[:, :, :, 15:18] + r7 = output[:, :, :, 18:21] + r8 = output[:, :, :, 21:24] + x = data + r1 * (tf.square(data) - data) + x = x + r2 * (tf.square(x) - x) + x = x + r3 * (tf.square(x) - x) + enhanced_image = x + r4 * (tf.square(x) - x) + x = enhanced_image + r5 * (tf.square(enhanced_image) - enhanced_image) + x = x + r6 * (tf.square(x) - x) + x = x + r7 * (tf.square(x) - x) + enhanced_image = x + r8 * (tf.square(x) - x) + return enhanced_image + + def call(self, data): + dce_net_output = self.dce_model(data) + return self.get_enhanced_image(data, dce_net_output) + + def compute_losses(self, data, output): + enhanced_image = self.get_enhanced_image(data, output) + loss_illumination = 200 * illumination_smoothness_loss(output) + loss_spatial_constancy = tf.reduce_mean( + self.spatial_constancy_loss(enhanced_image, data) + ) + loss_color_constancy = 5 * tf.reduce_mean(color_constancy_loss(enhanced_image)) + loss_exposure = 10 * tf.reduce_mean(exposure_loss(enhanced_image)) + total_loss = ( + loss_illumination + + loss_spatial_constancy + + loss_color_constancy + + loss_exposure + ) + return { + "total_loss": total_loss, + "illumination_smoothness_loss": loss_illumination, + "spatial_constancy_loss": loss_spatial_constancy, + "color_constancy_loss": loss_color_constancy, + "exposure_loss": loss_exposure, + } + + def train_step(self, data): + with tf.GradientTape() as tape: + output = self.dce_model(data) + losses = self.compute_losses(data, output) + gradients = tape.gradient( + losses["total_loss"], self.dce_model.trainable_weights + ) + self.optimizer.apply_gradients(zip(gradients, self.dce_model.trainable_weights)) + return losses + + def test_step(self, data): + output = self.dce_model(data) + return self.compute_losses(data, output) + + def save_weights(self, filepath, overwrite=True, save_format=None, options=None): + """While saving the weights, we simply save the weights of the DCE-Net""" + self.dce_model.save_weights( + filepath, overwrite=overwrite, save_format=save_format, options=options + ) + + def load_weights(self, filepath, by_name=False, skip_mismatch=False, options=None): + """While loading the weights, we simply load the weights of the DCE-Net""" + self.dce_model.load_weights( + filepath=filepath, + by_name=by_name, + skip_mismatch=skip_mismatch, + options=options, + ) + +""" +## Add time history callbacks +""" + +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + +""" +## Training +""" + +ckpt_path = './ckpt/checkpoint' +zero_dce_model = ZeroDCE() +zero_dce_model.compile(learning_rate=args.learning_rate) +history = zero_dce_model.fit(train_dataset, validation_data=val_dataset, epochs=args.epochs, verbose=2, callbacks=[TimeHistory(args.batch_size,args.log_steps)],) +zero_dce_model.save_weights(ckpt_path) + + +#def plot_result(item): +# plt.plot(history.history[item], label=item) +# plt.plot(history.history["val_" + item], label="val_" + item) +# plt.xlabel("Epochs") +# plt.ylabel(item) +# plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14) +# plt.legend() +# plt.grid() +# plt.show() + + +#plot_result("total_loss") +#plot_result("illumination_smoothness_loss") +#plot_result("spatial_constancy_loss") +#plot_result("color_constancy_loss") +#plot_result("exposure_loss") + +""" +## Inference +""" + + +#def plot_results(images, titles, figure_size=(12, 12)): +# fig = plt.figure(figsize=figure_size) +# for i in range(len(images)): +# fig.add_subplot(1, len(images), i + 1).set_title(titles[i]) +# _ = plt.imshow(images[i]) +# plt.axis("off") +# plt.show() + + +#def infer(original_image): +# image = keras.preprocessing.image.img_to_array(original_image) +# image = image.astype("float32") / 255.0 +# image = np.expand_dims(image, axis=0) +# output_image = zero_dce_model(image) +# output_image = tf.cast((output_image[0, :, :, :] * 255), dtype=np.uint8) +# output_image = Image.fromarray(output_image.numpy()) +# return output_image + + +""" +### Inference on test images + +We compare the test images from LOLDataset enhanced by MIRNet with images enhanced via +the `PIL.ImageOps.autocontrast()` function. +""" + +#for val_image_file in test_low_light_images: +# original_image = Image.open(val_image_file) +# enhanced_image = infer(original_image) +# plot_results( +# [original_image, ImageOps.autocontrast(original_image), enhanced_image], +# ["Original", "PIL Autocontrast", "Enhanced"], +# (20, 12), +# ) -- Gitee From 4c53de40bdfd90608d4529ba4dc9e7fea6ca23a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:17:15 +0000 Subject: [PATCH 05/54] =?UTF-8?q?TF2.X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cutmix_ID2502_for_TensorFlow2.X/LICENSE | 284 ++++++++++ .../cutmix_ID2502_for_TensorFlow2.X/README.md | 488 ++++++++++++++++++ .../requirements.txt | 4 + .../test/train_full_1p.sh | 122 +++++ .../test/train_performance_1p_dynamic_eval.sh | 128 +++++ .../test/train_performance_1p_static_eval.sh | 122 +++++ .../cutmix_ID2502_for_TensorFlow2.X/train.py | 486 +++++++++++++++++ 7 files changed, 1634 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/train.py diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..e30ebbbb7 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/README.md @@ -0,0 +1,488 @@ +# CutMix data augmentation for image classification + +**Author:** [Sayan Nath](https://twitter.com/sayannath2350)
+**Date created:** 2021/06/08
+**Last modified:** 2021/06/08
+**Description:** Data augmentation with CutMix for image classification on CIFAR-10. + + + [**View in Colab**](https://colab.research.google.com/github/keras-team\keras-io\blob\master\examples\vision/ipynb/cutmix.ipynb) [**GitHub source**](https://github.com/keras-team\keras-io\blob\master\examples\vision/cutmix.py) + + + +--- +## Introduction + +_CutMix_ is a data augmentation technique that addresses the issue of information loss +and inefficiency present in regional dropout strategies. +Instead of removing pixels and filling them with black or grey pixels or Gaussian noise, +you replace the removed regions with a patch from another image, +while the ground truth labels are mixed proportionally to the number of pixels of combined images. +CutMix was proposed in +[CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features](https://arxiv.org/pdf/1905.04899.pdf) +(Yun et al., 2019) + +It's implemented via the following formulas: + + + +where `M` is the binary mask which indicates the cutout and the fill-in +regions from the two randomly drawn images and `λ` (in `[0, 1]`) is drawn from a +[`Beta(α, α)` distribution](https://en.wikipedia.org/wiki/Beta_distribution) + +The coordinates of bounding boxes are: + + + +which indicates the cutout and fill-in regions in case of the images. +The bounding box sampling is represented by: + + + +where `rx, ry` are randomly drawn from a uniform distribution with upper bound. + +--- +## Setup + + +```python +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +import tensorflow as tf +from tensorflow import keras + +np.random.seed(42) +tf.random.set_seed(42) +``` + +--- +## Load the CIFAR-10 dataset + +In this example, we will use the +[CIFAR-10 image classification dataset](https://www.cs.toronto.edu/~kriz/cifar.html). + + +```python +(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() +y_train = tf.keras.utils.to_categorical(y_train, num_classes=10) +y_test = tf.keras.utils.to_categorical(y_test, num_classes=10) + +print(x_train.shape) +print(y_train.shape) +print(x_test.shape) +print(y_test.shape) + +class_names = [ + "Airplane", + "Automobile", + "Bird", + "Cat", + "Deer", + "Dog", + "Frog", + "Horse", + "Ship", + "Truck", +] +``` + +
+``` +(50000, 32, 32, 3) +(50000, 10) +(10000, 32, 32, 3) +(10000, 10) +``` +
+ + +--- +## Define hyperparameters + + +```python +AUTO = tf.data.AUTOTUNE +BATCH_SIZE = 32 +IMG_SIZE = 32 +``` + +--- +## Define the image preprocessing function + + +```python + +def preprocess_image(image, label): + image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE)) + image = tf.image.convert_image_dtype(image, tf.float32) / 255.0 + return image, label + +``` + +--- +## Convert the data into TensorFlow `Dataset` objects + + +```python +train_ds_one = ( + tf.data.Dataset.from_tensor_slices((x_train, y_train)) + .shuffle(1024) + .map(preprocess_image, num_parallel_calls=AUTO) +) +train_ds_two = ( + tf.data.Dataset.from_tensor_slices((x_train, y_train)) + .shuffle(1024) + .map(preprocess_image, num_parallel_calls=AUTO) +) + +train_ds_simple = tf.data.Dataset.from_tensor_slices((x_train, y_train)) + +test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)) + +train_ds_simple = ( + train_ds_simple.map(preprocess_image, num_parallel_calls=AUTO) + .batch(BATCH_SIZE) + .prefetch(AUTO) +) + +# Combine two shuffled datasets from the same training data. +train_ds = tf.data.Dataset.zip((train_ds_one, train_ds_two)) + +test_ds = ( + test_ds.map(preprocess_image, num_parallel_calls=AUTO) + .batch(BATCH_SIZE) + .prefetch(AUTO) +) +``` + +--- +## Define the CutMix data augmentation function + +The CutMix function takes two `image` and `label` pairs to perform the augmentation. It samples `λ(l)` from the [Beta distribution](https://en.wikipedia.org/wiki/Beta_distribution) and returns a bounding box from `get_box` function. We then crop the second image (`image2`) and pad this image in the final padded image at the same location. + + +```python + +def sample_beta_distribution(size, concentration_0=0.2, concentration_1=0.2): + gamma_1_sample = tf.random.gamma(shape=[size], alpha=concentration_1) + gamma_2_sample = tf.random.gamma(shape=[size], alpha=concentration_0) + return gamma_1_sample / (gamma_1_sample + gamma_2_sample) + + +@tf.function +def get_box(lambda_value): + cut_rat = tf.math.sqrt(1.0 - lambda_value) + + cut_w = IMG_SIZE * cut_rat # rw + cut_w = tf.cast(cut_w, tf.int32) + + cut_h = IMG_SIZE * cut_rat # rh + cut_h = tf.cast(cut_h, tf.int32) + + cut_x = tf.random.uniform((1,), minval=0, maxval=IMG_SIZE, dtype=tf.int32) # rx + cut_y = tf.random.uniform((1,), minval=0, maxval=IMG_SIZE, dtype=tf.int32) # ry + + boundaryx1 = tf.clip_by_value(cut_x[0] - cut_w // 2, 0, IMG_SIZE) + boundaryy1 = tf.clip_by_value(cut_y[0] - cut_h // 2, 0, IMG_SIZE) + bbx2 = tf.clip_by_value(cut_x[0] + cut_w // 2, 0, IMG_SIZE) + bby2 = tf.clip_by_value(cut_y[0] + cut_h // 2, 0, IMG_SIZE) + + target_h = bby2 - boundaryy1 + if target_h == 0: + target_h += 1 + + target_w = bbx2 - boundaryx1 + if target_w == 0: + target_w += 1 + + return boundaryx1, boundaryy1, target_h, target_w + + +@tf.function +def cutmix(train_ds_one, train_ds_two): + (image1, label1), (image2, label2) = train_ds_one, train_ds_two + + alpha = [0.25] + beta = [0.25] + + # Get a sample from the Beta distribution + lambda_value = sample_beta_distribution(1, alpha, beta) + + # Define Lambda + lambda_value = lambda_value[0][0] + + # Get the bounding box offsets, heights and widths + boundaryx1, boundaryy1, target_h, target_w = get_box(lambda_value) + + # Get a patch from the second image (`image2`) + crop2 = tf.image.crop_to_bounding_box( + image2, boundaryy1, boundaryx1, target_h, target_w + ) + # Pad the `image2` patch (`crop2`) with the same offset + image2 = tf.image.pad_to_bounding_box( + crop2, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE + ) + # Get a patch from the first image (`image1`) + crop1 = tf.image.crop_to_bounding_box( + image1, boundaryy1, boundaryx1, target_h, target_w + ) + # Pad the `image1` patch (`crop1`) with the same offset + img1 = tf.image.pad_to_bounding_box( + crop1, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE + ) + + # Modify the first image by subtracting the patch from `image1` + # (before applying the `image2` patch) + image1 = image1 - img1 + # Add the modified `image1` and `image2` together to get the CutMix image + image = image1 + image2 + + # Adjust Lambda in accordance to the pixel ration + lambda_value = 1 - (target_w * target_h) / (IMG_SIZE * IMG_SIZE) + lambda_value = tf.cast(lambda_value, tf.float32) + + # Combine the labels of both images + label = lambda_value * label1 + (1 - lambda_value) * label2 + return image, label + +``` + +**Note**: we are combining two images to create a single one. + +--- +## Visualize the new dataset after applying the CutMix augmentation + + +```python +# Create the new dataset using our `cutmix` utility +train_ds_cmu = ( + train_ds.shuffle(1024) + .map(cutmix, num_parallel_calls=AUTO) + .batch(BATCH_SIZE) + .prefetch(AUTO) +) + +# Let's preview 9 samples from the dataset +image_batch, label_batch = next(iter(train_ds_cmu)) +plt.figure(figsize=(10, 10)) +for i in range(9): + ax = plt.subplot(3, 3, i + 1) + plt.title(class_names[np.argmax(label_batch[i])]) + plt.imshow(image_batch[i]) + plt.axis("off") +``` + + + +![png](/img/examples/vision/cutmix/cutmix_16_0.png) + + + +--- +## Define a ResNet-20 model + + +```python + +def resnet_layer( + inputs, + num_filters=16, + kernel_size=3, + strides=1, + activation="relu", + batch_normalization=True, + conv_first=True, +): + conv = keras.layers.Conv2D( + num_filters, + kernel_size=kernel_size, + strides=strides, + padding="same", + kernel_initializer="he_normal", + kernel_regularizer=keras.regularizers.l2(1e-4), + ) + x = inputs + if conv_first: + x = conv(x) + if batch_normalization: + x = keras.layers.BatchNormalization()(x) + if activation is not None: + x = keras.layers.Activation(activation)(x) + else: + if batch_normalization: + x = keras.layers.BatchNormalization()(x) + if activation is not None: + x = keras.layers.Activation(activation)(x) + x = conv(x) + return x + + +def resnet_v20(input_shape, depth, num_classes=10): + if (depth - 2) % 6 != 0: + raise ValueError("depth should be 6n+2 (eg 20, 32, 44 in [a])") + # Start model definition. + num_filters = 16 + num_res_blocks = int((depth - 2) / 6) + + inputs = keras.layers.Input(shape=input_shape) + x = resnet_layer(inputs=inputs) + # Instantiate the stack of residual units + for stack in range(3): + for res_block in range(num_res_blocks): + strides = 1 + if stack > 0 and res_block == 0: # first layer but not first stack + strides = 2 # downsample + y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides) + y = resnet_layer(inputs=y, num_filters=num_filters, activation=None) + if stack > 0 and res_block == 0: # first layer but not first stack + # linear projection residual shortcut connection to match + # changed dims + x = resnet_layer( + inputs=x, + num_filters=num_filters, + kernel_size=1, + strides=strides, + activation=None, + batch_normalization=False, + ) + x = keras.layers.add([x, y]) + x = keras.layers.Activation("relu")(x) + num_filters *= 2 + + # Add classifier on top. + # v1 does not use BN after last shortcut connection-ReLU + x = keras.layers.AveragePooling2D(pool_size=8)(x) + y = keras.layers.Flatten()(x) + outputs = keras.layers.Dense( + num_classes, activation="softmax", kernel_initializer="he_normal" + )(y) + + # Instantiate model. + model = keras.models.Model(inputs=inputs, outputs=outputs) + return model + + +def training_model(): + return resnet_v20((32, 32, 3), 20) + + +initial_model = training_model() +initial_model.save_weights("initial_weights.h5") +``` + +--- +## Train the model with the dataset augmented by CutMix + + +```python +model = training_model() +model.load_weights("initial_weights.h5") + +model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) +model.fit(train_ds_cmu, validation_data=test_ds, epochs=15) + +test_loss, test_accuracy = model.evaluate(test_ds) +print("Test accuracy: {:.2f}%".format(test_accuracy * 100)) +``` + +
+``` +Epoch 1/15 +1563/1563 [==============================] - 62s 24ms/step - loss: 1.9216 - accuracy: 0.4090 - val_loss: 1.9737 - val_accuracy: 0.4061 +Epoch 2/15 +1563/1563 [==============================] - 37s 24ms/step - loss: 1.6549 - accuracy: 0.5325 - val_loss: 1.5033 - val_accuracy: 0.5061 +Epoch 3/15 +1563/1563 [==============================] - 38s 24ms/step - loss: 1.5536 - accuracy: 0.5840 - val_loss: 1.2913 - val_accuracy: 0.6112 +Epoch 4/15 +1563/1563 [==============================] - 38s 24ms/step - loss: 1.4988 - accuracy: 0.6097 - val_loss: 1.0587 - val_accuracy: 0.7033 +Epoch 5/15 +1563/1563 [==============================] - 38s 24ms/step - loss: 1.4531 - accuracy: 0.6291 - val_loss: 1.0681 - val_accuracy: 0.6841 +Epoch 6/15 +1563/1563 [==============================] - 37s 24ms/step - loss: 1.4173 - accuracy: 0.6464 - val_loss: 1.0265 - val_accuracy: 0.7085 +Epoch 7/15 +1563/1563 [==============================] - 37s 24ms/step - loss: 1.3932 - accuracy: 0.6572 - val_loss: 0.9540 - val_accuracy: 0.7331 +Epoch 8/15 +1563/1563 [==============================] - 37s 24ms/step - loss: 1.3736 - accuracy: 0.6680 - val_loss: 0.9877 - val_accuracy: 0.7240 +Epoch 9/15 +1563/1563 [==============================] - 38s 24ms/step - loss: 1.3575 - accuracy: 0.6782 - val_loss: 0.8944 - val_accuracy: 0.7570 +Epoch 10/15 +1563/1563 [==============================] - 38s 24ms/step - loss: 1.3398 - accuracy: 0.6886 - val_loss: 0.8598 - val_accuracy: 0.7649 +Epoch 11/15 +1563/1563 [==============================] - 38s 24ms/step - loss: 1.3277 - accuracy: 0.6939 - val_loss: 0.9032 - val_accuracy: 0.7603 +Epoch 12/15 +1563/1563 [==============================] - 38s 24ms/step - loss: 1.3131 - accuracy: 0.6964 - val_loss: 0.7934 - val_accuracy: 0.7926 +Epoch 13/15 +1563/1563 [==============================] - 37s 24ms/step - loss: 1.3050 - accuracy: 0.7029 - val_loss: 0.8737 - val_accuracy: 0.7552 +Epoch 14/15 +1563/1563 [==============================] - 37s 24ms/step - loss: 1.2987 - accuracy: 0.7099 - val_loss: 0.8409 - val_accuracy: 0.7766 +Epoch 15/15 +1563/1563 [==============================] - 37s 24ms/step - loss: 1.2953 - accuracy: 0.7099 - val_loss: 0.7850 - val_accuracy: 0.8014 +313/313 [==============================] - 3s 9ms/step - loss: 0.7850 - accuracy: 0.8014 +Test accuracy: 80.14% +``` +
+ + +--- +## Train the model using the original non-augmented dataset + + +```python +model = training_model() +model.load_weights("initial_weights.h5") +model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) +model.fit(train_ds_simple, validation_data=test_ds, epochs=15) + +test_loss, test_accuracy = model.evaluate(test_ds) +print("Test accuracy: {:.2f}%".format(test_accuracy * 100)) +``` + +
+``` +Epoch 1/15 +1563/1563 [==============================] - 38s 23ms/step - loss: 1.4864 - accuracy: 0.5173 - val_loss: 1.3694 - val_accuracy: 0.5708 +Epoch 2/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 1.0682 - accuracy: 0.6779 - val_loss: 1.1424 - val_accuracy: 0.6686 +Epoch 3/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 0.8955 - accuracy: 0.7449 - val_loss: 1.0555 - val_accuracy: 0.7007 +Epoch 4/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 0.7890 - accuracy: 0.7878 - val_loss: 1.0575 - val_accuracy: 0.7079 +Epoch 5/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 0.7107 - accuracy: 0.8175 - val_loss: 1.1395 - val_accuracy: 0.7062 +Epoch 6/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 0.6524 - accuracy: 0.8397 - val_loss: 1.1716 - val_accuracy: 0.7042 +Epoch 7/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 0.6098 - accuracy: 0.8594 - val_loss: 1.4120 - val_accuracy: 0.6786 +Epoch 8/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 0.5715 - accuracy: 0.8765 - val_loss: 1.3159 - val_accuracy: 0.7011 +Epoch 9/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 0.5477 - accuracy: 0.8872 - val_loss: 1.2873 - val_accuracy: 0.7182 +Epoch 10/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 0.5233 - accuracy: 0.8988 - val_loss: 1.4118 - val_accuracy: 0.6964 +Epoch 11/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 0.5165 - accuracy: 0.9045 - val_loss: 1.3741 - val_accuracy: 0.7230 +Epoch 12/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 0.5008 - accuracy: 0.9124 - val_loss: 1.3984 - val_accuracy: 0.7181 +Epoch 13/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 0.4896 - accuracy: 0.9190 - val_loss: 1.3642 - val_accuracy: 0.7209 +Epoch 14/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 0.4845 - accuracy: 0.9231 - val_loss: 1.5469 - val_accuracy: 0.6992 +Epoch 15/15 +1563/1563 [==============================] - 36s 23ms/step - loss: 0.4749 - accuracy: 0.9294 - val_loss: 1.4034 - val_accuracy: 0.7362 +313/313 [==============================] - 3s 9ms/step - loss: 1.4034 - accuracy: 0.7362 +Test accuracy: 73.62% +``` +
+ + +--- +## Notes + +In this example, we trained our model for 15 epochs. +In our experiment, the model with CutMix achieves a better accuracy on the CIFAR-10 dataset +(80.36% in our experiment) compared to the model that doesn't use the augmentation (72.70%). +You may notice it takes less time to train the model with the CutMix augmentation. + +You can experiment further with the CutMix technique by following the +[original paper](https://arxiv.org/abs/1905.04899). \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..d1e80795e --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/requirements.txt @@ -0,0 +1,4 @@ +os +numpy +pandas +tensorflow diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..bf267b779 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,122 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="cutmix_ID2502_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=15 +#训练step +#train_steps=50000 +#学习率 +# learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_static_eval.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +#参数修改 +#sed -i "s|../data/tfrecord|${data_path}/data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_CKPT = ROOT_PATH + '/|PRETRAINED_CKPT = '${cur_path}/|g" ${cur_path}/libs/configs/cfgs.py + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 $cur_path/train.py --data_dir=${data_path} \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --eval_static=True > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#参数回改 +#sed -i "s|${data_path}/data/tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_CKPT = '${cur_path}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ms/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'NR==2' | awk '{print$5}' | tr -cd "[0-9]"` +TrainingTime=`awk 'BEGIN{printf "%.3f\n",'${TrainingTime}'/'1000'}'` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +# FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${TrainingTime}'}'` + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +# train_accuracy=`grep accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $11}'` +train_accuracy=`grep s/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$17}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep loss: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $8}' |grep -v loss > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |tr -d '\b\r'| grep -Eo "loss: [0-9]*\.[0-9]*" | awk -F " " '{print $2}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +# ActualLoss=`grep s/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$8}'` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh new file mode 100644 index 000000000..80fbce60b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh @@ -0,0 +1,128 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL_ETP=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="cutmix_ID2502_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +train_epochs=1 +#训练step +# train_steps=5 +#学习率 +# learning_rate=0.0001 +ckpt_path="" +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +#参数修改 +#sed -i "s|../data/tfrecord|${data_path}/data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_CKPT = ROOT_PATH + '/|PRETRAINED_CKPT = '${cur_path}/|g" ${cur_path}/libs/configs/cfgs.py + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 train.py --data_dir=${data_path}\ + --epochs=${train_epochs}\ + --batch_size=${batch_size}\ + --eval_static=False > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#参数回改 +#sed -i "s|${datth}/th}//io//tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_C'/|g" ${cur_paath}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ms/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |awk '{print $5}'|awk -F "ms" '{print $1}'|sed s/[[:space:]]//g` +wait +#FPS +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*'1000'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep ms/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $11}'|sed s/[[:space:]]//g` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RankSize}'p'_'dynamic'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +#TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 + +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep -v "ms/step"|tr -d '\b\r'| grep -Eo " loss: [0-9]*\.[0-9]*"|awk '{print $2}'|sed s/[[:space:]]//g > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +#ModelStatus="图执行FAIL" +#DTS_Number="DTS2021090622224" +#error_msg="type Conv2DBackpropFilter is not found in this op store" +#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#error_msg=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|tail -l` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RankSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh new file mode 100644 index 000000000..997ae82a6 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh @@ -0,0 +1,122 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="cutmix_ID2502_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=5 +#训练step +#train_steps=50000 +#学习率 +# learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_static_eval.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +#参数修改 +#sed -i "s|../data/tfrecord|${data_path}/data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_CKPT = ROOT_PATH + '/|PRETRAINED_CKPT = '${cur_path}/|g" ${cur_path}/libs/configs/cfgs.py + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 $cur_path/train.py --data_dir=${data_path} \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --eval_static=True > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#参数回改 +#sed -i "s|${data_path}/data/tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_CKPT = '${cur_path}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ms/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'NR==2' | awk '{print$5}' | tr -cd "[0-9]"` +TrainingTime=`awk 'BEGIN{printf "%.3f\n",'${TrainingTime}'/'1000'}'` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +# FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${TrainingTime}'}'` + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +# train_accuracy=`grep accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $11}'` +train_accuracy=`grep s/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$17}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep loss: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $8}' |grep -v loss > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |tr -d '\b\r'| grep -Eo "loss: [0-9]*\.[0-9]*" | awk -F " " '{print $2}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +# ActualLoss=`grep s/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$8}'` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..629a54eb0 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/train.py @@ -0,0 +1,486 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Title: CutMix data augmentation for image classification +Author: [Sayan Nath](https://twitter.com/sayannath2350) +Date created: 2021/06/08 +Last modified: 2021/06/08 +Description: Data augmentation with CutMix for image classification on CIFAR-10. +""" + +""" +## Introduction +""" + +""" +_CutMix_ is a data augmentation technique that addresses the issue of information loss +and inefficiency present in regional dropout strategies. +Instead of removing pixels and filling them with black or grey pixels or Gaussian noise, +you replace the removed regions with a patch from another image, +while the ground truth labels are mixed proportionally to the number of pixels of combined images. +CutMix was proposed in +[CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features](https://arxiv.org/pdf/1905.04899.pdf) +(Yun et al., 2019) + +It's implemented via the following formulas: + + + +where `M` is the binary mask which indicates the cutout and the fill-in +regions from the two randomly drawn images and `λ` (in `[0, 1]`) is drawn from a +[`Beta(α, α)` distribution](https://en.wikipedia.org/wiki/Beta_distribution) + +The coordinates of bounding boxes are: + + + +which indicates the cutout and fill-in regions in case of the images. +The bounding box sampling is represented by: + + + +where `rx, ry` are randomly drawn from a uniform distribution with upper bound. +""" + +""" +## Setup +""" +import npu_device +print('npu_device loaded') +npu_device.open().as_default() + +import os +import ast +import numpy as np +import pandas as pd +# import matplotlib.pyplot as plt +import tensorflow as tf +from tensorflow import keras +from tensorflow.python.keras import backend as K +from tensorflow.python.keras.datasets.cifar import load_batch +import argparse +np.random.seed(42) +tf.random.set_seed(42) + +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_dir', default="../cifar-10-batches-py/", + help="""directory to data""") + parser.add_argument('--batch_size', default=32, type=int, + help="""batch size for 1p""") + parser.add_argument('--epochs', default=15, type=int, + help="""epochs""") + parser.add_argument('--eval_static', dest="eval_static", type=ast.literal_eval, + help='the path to train data') + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + + +args = parse_args() +data_path = args.data_dir +num_epochs = args.epochs + +""" +## Load the CIFAR-10 dataset + +In this example, we will use the +[CIFAR-10 image classification dataset](https://www.cs.toronto.edu/~kriz/cifar.html). +""" +def load_data(data_path): + num_train_samples = 50000 + x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8') + y_train = np.empty((num_train_samples,), dtype='uint8') + + for i in range(1, 6): + fpath = os.path.join(data_path, 'data_batch_' + str(i)) + (x_train[(i - 1) * 10000:i * 10000, :, :, :], + y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath) + + fpath = os.path.join(data_path, 'test_batch') + x_test, y_test = load_batch(fpath) + + y_train = np.reshape(y_train, (len(y_train), 1)) + y_test = np.reshape(y_test, (len(y_test), 1)) + + if K.image_data_format() == 'channels_last': + x_train = x_train.transpose(0, 2, 3, 1) + x_test = x_test.transpose(0, 2, 3, 1) + + x_test = x_test.astype(x_train.dtype) + y_test = y_test.astype(y_train.dtype) + + return (x_train, y_train), (x_test, y_test) + +(x_train, y_train), (x_test, y_test) = load_data(data_path) + +y_train = tf.keras.utils.to_categorical(y_train, num_classes=10) +y_test = tf.keras.utils.to_categorical(y_test, num_classes=10) + +print(x_train.shape) +print(y_train.shape) +print(x_test.shape) +print(y_test.shape) + +class_names = [ + "Airplane", + "Automobile", + "Bird", + "Cat", + "Deer", + "Dog", + "Frog", + "Horse", + "Ship", + "Truck", +] + +""" +## Define hyperparameters +""" + +AUTO = tf.data.AUTOTUNE +BATCH_SIZE = args.batch_size +IMG_SIZE = 32 + +""" +## Define the image preprocessing function +""" + + +def preprocess_image(image, label): + image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE)) + image = tf.image.convert_image_dtype(image, tf.float32) / 255.0 + return image, label + + +""" +## Convert the data into TensorFlow `Dataset` objects +""" + +train_ds_one = ( + tf.data.Dataset.from_tensor_slices((x_train, y_train)) + .shuffle(1024) + .map(preprocess_image, num_parallel_calls=AUTO) +) +train_ds_two = ( + tf.data.Dataset.from_tensor_slices((x_train, y_train)) + .shuffle(1024) + .map(preprocess_image, num_parallel_calls=AUTO) +) + +train_ds_simple = tf.data.Dataset.from_tensor_slices((x_train, y_train)) + +if args.eval_static: + + test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)) + + train_ds_simple = ( + train_ds_simple.map(preprocess_image, num_parallel_calls=AUTO) + .batch(BATCH_SIZE, drop_remainder=True) + .prefetch(AUTO) + ) + + # Combine two shuffled datasets from the same training data. + train_ds = tf.data.Dataset.zip((train_ds_one, train_ds_two)) + + test_ds = ( + test_ds.map(preprocess_image, num_parallel_calls=AUTO) + .batch(BATCH_SIZE, drop_remainder=True) + .prefetch(AUTO) + ) +else: + + test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)) + + train_ds_simple = ( + train_ds_simple.map(preprocess_image, num_parallel_calls=AUTO) + .batch(BATCH_SIZE) + .prefetch(AUTO) + ) + + # Combine two shuffled datasets from the same training data. + train_ds = tf.data.Dataset.zip((train_ds_one, train_ds_two)) + + test_ds = ( + test_ds.map(preprocess_image, num_parallel_calls=AUTO) + .batch(BATCH_SIZE) + .prefetch(AUTO) + ) + +""" +## Define the CutMix data augmentation function + +The CutMix function takes two `image` and `label` pairs to perform the augmentation. It samples `λ(l)` from the [Beta distribution](https://en.wikipedia.org/wiki/Beta_distribution) and returns a bounding box from `get_box` function. We then crop the second image (`image2`) and pad this image in the final padded image at the same location. +""" + + +def sample_beta_distribution(size, concentration_0=0.2, concentration_1=0.2): + gamma_1_sample = tf.random.gamma(shape=[size], alpha=concentration_1) + gamma_2_sample = tf.random.gamma(shape=[size], alpha=concentration_0) + return gamma_1_sample / (gamma_1_sample + gamma_2_sample) + + +@tf.function +def get_box(lambda_value): + cut_rat = tf.math.sqrt(1.0 - lambda_value) + + cut_w = IMG_SIZE * cut_rat # rw + cut_w = tf.cast(cut_w, tf.int32) + + cut_h = IMG_SIZE * cut_rat # rh + cut_h = tf.cast(cut_h, tf.int32) + + cut_x = tf.random.uniform((1,), minval=0, maxval=IMG_SIZE, dtype=tf.int32) # rx + cut_y = tf.random.uniform((1,), minval=0, maxval=IMG_SIZE, dtype=tf.int32) # ry + + boundaryx1 = tf.clip_by_value(cut_x[0] - cut_w // 2, 0, IMG_SIZE) + boundaryy1 = tf.clip_by_value(cut_y[0] - cut_h // 2, 0, IMG_SIZE) + bbx2 = tf.clip_by_value(cut_x[0] + cut_w // 2, 0, IMG_SIZE) + bby2 = tf.clip_by_value(cut_y[0] + cut_h // 2, 0, IMG_SIZE) + + target_h = bby2 - boundaryy1 + if target_h == 0: + target_h += 1 + + target_w = bbx2 - boundaryx1 + if target_w == 0: + target_w += 1 + + return boundaryx1, boundaryy1, target_h, target_w + + +@tf.function +def cutmix(train_ds_one, train_ds_two): + (image1, label1), (image2, label2) = train_ds_one, train_ds_two + + alpha = [0.25] + beta = [0.25] + + # Get a sample from the Beta distribution + lambda_value = sample_beta_distribution(1, alpha, beta) + + # Define Lambda + lambda_value = lambda_value[0][0] + + # Get the bounding box offsets, heights and widths + boundaryx1, boundaryy1, target_h, target_w = get_box(lambda_value) + + # Get a patch from the second image (`image2`) + crop2 = tf.image.crop_to_bounding_box( + image2, boundaryy1, boundaryx1, target_h, target_w + ) + # Pad the `image2` patch (`crop2`) with the same offset + image2 = tf.image.pad_to_bounding_box( + crop2, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE + ) + # Get a patch from the first image (`image1`) + crop1 = tf.image.crop_to_bounding_box( + image1, boundaryy1, boundaryx1, target_h, target_w + ) + # Pad the `image1` patch (`crop1`) with the same offset + img1 = tf.image.pad_to_bounding_box( + crop1, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE + ) + + # Modify the first image by subtracting the patch from `image1` + # (before applying the `image2` patch) + image1 = image1 - img1 + # Add the modified `image1` and `image2` together to get the CutMix image + image = image1 + image2 + + # Adjust Lambda in accordance to the pixel ration + lambda_value = 1 - (target_w * target_h) / (IMG_SIZE * IMG_SIZE) + lambda_value = tf.cast(lambda_value, tf.float32) + + # Combine the labels of both images + label = lambda_value * label1 + (1 - lambda_value) * label2 + return image, label + + +""" +**Note**: we are combining two images to create a single one. + +## Visualize the new dataset after applying the CutMix augmentation +""" + +# Create the new dataset using our `cutmix` utility +if args.eval_static: + train_ds_cmu = ( + train_ds.shuffle(1024) + .map(cutmix, num_parallel_calls=AUTO) + .batch(BATCH_SIZE, drop_remainder=True) + .prefetch(AUTO) + ) +else: + train_ds_cmu = ( + train_ds.shuffle(1024) + .map(cutmix, num_parallel_calls=AUTO) + .batch(BATCH_SIZE) + .prefetch(AUTO) + ) + +# Let's preview 9 samples from the dataset +# image_batch, label_batch = next(iter(train_ds_cmu)) +# plt.figure(figsize=(10, 10)) +# for i in range(9): +# ax = plt.subplot(3, 3, i + 1) +# plt.title(class_names[np.argmax(label_batch[i])]) +# plt.imshow(image_batch[i]) +# plt.axis("off") + +""" +## Define a ResNet-20 model +""" + + +def resnet_layer( + inputs, + num_filters=16, + kernel_size=3, + strides=1, + activation="relu", + batch_normalization=True, + conv_first=True, +): + conv = keras.layers.Conv2D( + num_filters, + kernel_size=kernel_size, + strides=strides, + padding="same", + kernel_initializer="he_normal", + kernel_regularizer=keras.regularizers.l2(1e-4), + ) + x = inputs + if conv_first: + x = conv(x) + if batch_normalization: + x = keras.layers.BatchNormalization()(x) + if activation is not None: + x = keras.layers.Activation(activation)(x) + else: + if batch_normalization: + x = keras.layers.BatchNormalization()(x) + if activation is not None: + x = keras.layers.Activation(activation)(x) + x = conv(x) + return x + + +def resnet_v20(input_shape, depth, num_classes=10): + if (depth - 2) % 6 != 0: + raise ValueError("depth should be 6n+2 (eg 20, 32, 44 in [a])") + # Start model definition. + num_filters = 16 + num_res_blocks = int((depth - 2) / 6) + + inputs = keras.layers.Input(shape=input_shape) + x = resnet_layer(inputs=inputs) + # Instantiate the stack of residual units + for stack in range(3): + for res_block in range(num_res_blocks): + strides = 1 + if stack > 0 and res_block == 0: # first layer but not first stack + strides = 2 # downsample + y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides) + y = resnet_layer(inputs=y, num_filters=num_filters, activation=None) + if stack > 0 and res_block == 0: # first layer but not first stack + # linear projection residual shortcut connection to match + # changed dims + x = resnet_layer( + inputs=x, + num_filters=num_filters, + kernel_size=1, + strides=strides, + activation=None, + batch_normalization=False, + ) + x = keras.layers.add([x, y]) + x = keras.layers.Activation("relu")(x) + num_filters *= 2 + + # Add classifier on top. + # v1 does not use BN after last shortcut connection-ReLU + x = keras.layers.AveragePooling2D(pool_size=8)(x) + y = keras.layers.Flatten()(x) + outputs = keras.layers.Dense( + num_classes, activation="softmax", kernel_initializer="he_normal" + )(y) + + # Instantiate model. + model = keras.models.Model(inputs=inputs, outputs=outputs) + return model + + +def training_model(): + return resnet_v20((32, 32, 3), 20) + + +initial_model = training_model() +initial_model.save_weights("initial_weights.h5") + +""" +## Train the model with the dataset augmented by CutMix +""" + +model = training_model() +model.load_weights("initial_weights.h5") + +model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) +model.fit(train_ds_cmu, validation_data=test_ds, epochs=num_epochs) + + +""" +## Train the model using the original non-augmented dataset +""" + +# model = training_model() +# model.load_weights("initial_weights.h5") +# model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) +# model.fit(train_ds_simple, validation_data=test_ds, epochs=15) + +# test_loss, test_accuracy = model.evaluate(test_ds) +# print("Test accuracy: {:.2f}%".format(test_accuracy * 100)) + +""" +## Notes + +In this example, we trained our model for 15 epochs. +In our experiment, the model with CutMix achieves a better accuracy on the CIFAR-10 dataset +(80.36% in our experiment) compared to the model that doesn't use the augmentation (72.70%). +You may notice it takes less time to train the model with the CutMix augmentation. + +You can experiment further with the CutMix technique by following the +[original paper](https://arxiv.org/abs/1905.04899). +""" \ No newline at end of file -- Gitee From b96b7cf192c494592fb89f21693c52b54cbb3319 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:20:52 +0000 Subject: [PATCH 06/54] add TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/modelzoo_level.txt. --- .../cutmix_ID2502_for_TensorFlow2.X/modelzoo_level.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/modelzoo_level.txt diff --git a/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..0b49b4fb2 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/cutmix_ID2502_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:OK +PrecisionStatus:OK \ No newline at end of file -- Gitee From 84b7186558e592444ff02a79941958964d13dafb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:25:13 +0000 Subject: [PATCH 07/54] =?UTF-8?q?pointnet=5FID2531=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../pointnet_ID2531_for_TensorFlow2.X/LICENSE | 284 ++++++++++++++ .../ReadME.md | 29 ++ .../pointnet.py | 355 ++++++++++++++++++ .../requirements.txt | 1 + .../test/train_full_1p.sh | 103 +++++ .../test/train_performance_1p_dynamic_eval.sh | 115 ++++++ .../test/train_performance_1p_static_eval.sh | 104 +++++ 7 files changed, 991 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/ReadME.md create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/pointnet.py create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/ReadME.md b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/ReadME.md new file mode 100644 index 000000000..d844898a8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/ReadME.md @@ -0,0 +1,29 @@ +# 一、基础信息 + +网络名称:`pointnet_ID2531_for_TensorFlow2.X` + +github addr:https://github.com/keras-team/keras-io/tree/master/examples/vision + +# 二、代码修改 + +# 三、程序运行 + +```shell +bash run_1p.sh +``` + +# 四、归档文件路径 + +1、数据集 +pointnet_ID2531_for_TensorFlow2.X,10.248.93.131:Huawei@123,/train_output/turingDataset/00-CV/ID2531_CarPeting_TF2.X_pointnet:2292148 + + +2、归档文件 + +3、迁移代码 + +4、源代码 + +5、源迁移代码 + +# 五、NPU工作环境 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/pointnet.py b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/pointnet.py new file mode 100644 index 000000000..790f4d52b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/pointnet.py @@ -0,0 +1,355 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Title: Point cloud classification with PointNet +Author: [David Griffiths](https://dgriffiths3.github.io) +Date created: 2020/05/25 +Last modified: 2020/05/26 +Description: Implementation of PointNet for ModelNet10 classification. +""" +""" +# Point cloud classification +""" + +""" +## Introduction + +Classification, detection and segmentation of unordered 3D point sets i.e. point clouds +is a core problem in computer vision. This example implements the seminal point cloud +deep learning paper [PointNet (Qi et al., 2017)](https://arxiv.org/abs/1612.00593). For a +detailed intoduction on PointNet see [this blog +post](https://medium.com/@luis_gonzales/an-in-depth-look-at-pointnet-111d7efdaa1a). +""" + +""" +## Setup + +If using colab first install trimesh with `!pip install trimesh`. +""" + + +import os +import argparse +import ast +import glob +import trimesh +import numpy as np +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import layers +from matplotlib import pyplot as plt + +import npu_device +npu_device.open().as_default() + +tf.random.set_seed(1234) + +''' +""" +## Load dataset + +We use the ModelNet10 model dataset, the smaller 10 class version of the ModelNet40 +dataset. First download the data: +""" + +DATA_DIR = tf.keras.utils.get_file( + "modelnet.zip", + "http://3dvision.princeton.edu/projects/2014/3DShapeNets/ModelNet10.zip", + extract=True, +) +DATA_DIR = os.path.join(os.path.dirname(DATA_DIR), "ModelNet10") + +""" +We can use the `trimesh` package to read and visualize the `.off` mesh files. +""" + +mesh = trimesh.load(os.path.join(DATA_DIR, "chair/train/chair_0001.off")) +mesh.show() + +""" +To convert a mesh file to a point cloud we first need to sample points on the mesh +surface. `.sample()` performs a unifrom random sampling. Here we sample at 2048 locations +and visualize in `matplotlib`. +""" + +points = mesh.sample(2048) + +fig = plt.figure(figsize=(5, 5)) +ax = fig.add_subplot(111, projection="3d") +ax.scatter(points[:, 0], points[:, 1], points[:, 2]) +ax.set_axis_off() +plt.show() +''' + + +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_path', default='./', + help="""directory to data""") + parser.add_argument('--batch_size', default=32, type=int, + help="""batch size for 1p""") + parser.add_argument('--epochs', default=20, type=int, + help="""epochs""") + parser.add_argument('--drop_remainder', default="True", type=ast.literal_eval, + help="""drop_remainder True or False remote dynamic or static input""") + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + + +""" +Set the number of points to sample and batch size and parse the dataset. This can take +~5minutes to complete. +""" + +args = parse_args() +DATA_DIR = os.path.join(args.data_path, "ModelNet10/") +NUM_POINTS = 2048 +NUM_CLASSES = 10 +BATCH_SIZE = args.batch_size +EPOCHS=args.epochs + + +""" +To generate a `tf.data.Dataset()` we need to first parse through the ModelNet data +folders. Each mesh is loaded and sampled into a point cloud before being added to a +standard python list and converted to a `numpy` array. We also store the current +enumerate index value as the object label and use a dictionary to recall this later. +""" + + +def parse_dataset(num_points=2048): + + train_points = [] + train_labels = [] + test_points = [] + test_labels = [] + class_map = {} + folders = glob.glob(os.path.join(DATA_DIR, "[!README]*")) + + for i, folder in enumerate(folders): + print("processing class: {}".format(os.path.basename(folder))) + # store folder name with ID so we can retrieve later + class_map[i] = folder.split("/")[-1] + # gather all files + train_files = glob.glob(os.path.join(folder, "train/*")) + test_files = glob.glob(os.path.join(folder, "test/*")) + + for f in train_files: + train_points.append(trimesh.load(f).sample(num_points)) + train_labels.append(i) + + for f in test_files: + test_points.append(trimesh.load(f).sample(num_points)) + test_labels.append(i) + + return ( + np.array(train_points), + np.array(test_points), + np.array(train_labels), + np.array(test_labels), + class_map, + ) + +train_points, test_points, train_labels, test_labels, CLASS_MAP = parse_dataset( + NUM_POINTS +) + +""" +Our data can now be read into a `tf.data.Dataset()` object. We set the shuffle buffer +size to the entire size of the dataset as prior to this the data is ordered by class. +Data augmentation is important when working with point cloud data. We create a +augmentation function to jitter and shuffle the train dataset. +""" + + +def augment(points, label): + # jitter points + points += tf.random.uniform(points.shape, -0.005, 0.005, dtype=tf.float64) + # shuffle points + points = tf.random.shuffle(points) + return points, label + + +train_dataset = tf.data.Dataset.from_tensor_slices((train_points, train_labels)) +test_dataset = tf.data.Dataset.from_tensor_slices((test_points, test_labels)) + +train_dataset = train_dataset.shuffle(len(train_points)).map(augment).batch(BATCH_SIZE, drop_remainder=args.drop_remainder) +test_dataset = test_dataset.shuffle(len(test_points)).batch(BATCH_SIZE, drop_remainder=args.drop_remainder) + +""" +### Build a model + +Each convolution and fully-connected layer (with exception for end layers) consits of +Convolution / Dense -> Batch Normalization -> ReLU Activation. +""" + + +def conv_bn(x, filters): + x = layers.Conv1D(filters, kernel_size=1, padding="valid")(x) + x = layers.BatchNormalization(momentum=0.0)(x) + return layers.Activation("relu")(x) + + +def dense_bn(x, filters): + x = layers.Dense(filters)(x) + x = layers.BatchNormalization(momentum=0.0)(x) + return layers.Activation("relu")(x) + + +""" +PointNet consists of two core components. The primary MLP network, and the transformer +net (T-net). The T-net aims to learn an affine transformation matrix by its own mini +network. The T-net is used twice. The first time to transform the input features (n, 3) +into a canonical representation. The second is an affine transformation for alignment in +feature space (n, 3). As per the original paper we constrain the transformation to be +close to an orthogonal matrix (i.e. ||X*X^T - I|| = 0). +""" + + +class OrthogonalRegularizer(keras.regularizers.Regularizer): + def __init__(self, num_features, l2reg=0.001): + self.num_features = num_features + self.l2reg = l2reg + self.eye = tf.eye(num_features) + + def __call__(self, x): + x = tf.reshape(x, (-1, self.num_features, self.num_features)) + xxt = tf.tensordot(x, x, axes=(2, 2)) + xxt = tf.reshape(xxt, (-1, self.num_features, self.num_features)) + return tf.reduce_sum(self.l2reg * tf.square(xxt - self.eye)) + + +""" + We can then define a general function to build T-net layers. +""" + + +def tnet(inputs, num_features): + + # Initalise bias as the indentity matrix + bias = keras.initializers.Constant(np.eye(num_features).flatten()) + reg = OrthogonalRegularizer(num_features) + + x = conv_bn(inputs, 32) + x = conv_bn(x, 64) + x = conv_bn(x, 512) + x = layers.GlobalMaxPooling1D()(x) + x = dense_bn(x, 256) + x = dense_bn(x, 128) + x = layers.Dense( + num_features * num_features, + kernel_initializer="zeros", + bias_initializer=bias, + activity_regularizer=reg, + )(x) + feat_T = layers.Reshape((num_features, num_features))(x) + # Apply affine transformation to input features + return layers.Dot(axes=(2, 1))([inputs, feat_T]) + + +""" +The main network can be then implemented in the same manner where the t-net mini models +can be dropped in a layers in the graph. Here we replicate the network architecture +published in the original paper but with half the number of weights at each layer as we +are using the smaller 10 class ModelNet dataset. +""" + +inputs = keras.Input(shape=(NUM_POINTS, 3)) + +x = tnet(inputs, 3) +x = conv_bn(x, 32) +x = conv_bn(x, 32) +x = tnet(x, 32) +x = conv_bn(x, 32) +x = conv_bn(x, 64) +x = conv_bn(x, 512) +x = layers.GlobalMaxPooling1D()(x) +x = dense_bn(x, 256) +x = layers.Dropout(0.3)(x) +x = dense_bn(x, 128) +x = layers.Dropout(0.3)(x) + +outputs = layers.Dense(NUM_CLASSES, activation="softmax")(x) + +model = keras.Model(inputs=inputs, outputs=outputs, name="pointnet") +model.summary() + +""" +### Train model + +Once the model is defined it can be trained like any other standard classification model +using `.compile()` and `.fit()`. +""" + +model.compile( + loss="sparse_categorical_crossentropy", + optimizer=keras.optimizers.Adam(learning_rate=0.001), + metrics=["sparse_categorical_accuracy"], +) + +model.fit(train_dataset, epochs=EPOCHS, validation_data=test_dataset) +model.save_weights(filepath="pointnet", save_format="tf") +""" +## Visualize predictions + +We can use matplotlib to visualize our trained model performance. +""" + +''' +data = test_dataset.take(1) + +points, labels = list(data)[0] +points = points[:8, ...] +labels = labels[:8, ...] + +# run test data through model +preds = model.predict(points) +preds = tf.math.argmax(preds, -1) + +points = points.numpy() + +# plot points with predicted class and label +fig = plt.figure(figsize=(15, 10)) +for i in range(8): + ax = fig.add_subplot(2, 4, i + 1, projection="3d") + ax.scatter(points[i, :, 0], points[i, :, 1], points[i, :, 2]) + ax.set_title( + "pred: {:}, label: {:}".format( + CLASS_MAP[preds[i].numpy()], CLASS_MAP[labels.numpy()[i]] + ) + ) + ax.set_axis_off() +plt.show() +''' diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..ac1db3b60 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/requirements.txt @@ -0,0 +1 @@ +trimesh \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..9d57d0954 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,103 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="pointnet_ID2531_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=20 +#训练step +train_steps=60000 +#学习率 +#learning_rate=1e-5 + +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 pointnet.py --data_path=$data_path \ + --epoch=$train_epochs \ + --eval_static=True > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ms/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'NR==1' | awk -F " " '{print$5}' | tr -cd "[0-9]"` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'1875'*'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep " val_sparse_categorical_accuracy:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $17}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep " loss:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $8}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh new file mode 100644 index 000000000..2c68e2f74 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh @@ -0,0 +1,115 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="pointnet_ID2531_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=1 +#训练step +train_steps=60000 +#学习率 +#learning_rate=1e-5 + +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 pointnet.py --data_path=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --drop_remainder=False > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep 1875/1875 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'1875'*'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep sparse_categorical_accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |awk 'END {print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep student_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $9}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +ModelStatus="图执行FAIL" +DTS_Number="DTS2021090622224" +# error_msg="E19999" +error_msg="EZ3002: Optype \[Conv2DBackpropFilter\] of Ops kernel \[AIcoreEngine\] is unsupported" +Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +# error_msg="Graph engine process graph failed: E19999: Inner Error! Output shape is still unknown after shape inference. shape = [-1]." + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh new file mode 100644 index 000000000..6b839114f --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh @@ -0,0 +1,104 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="pointnet_ID2531_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=1 +#训练step +train_steps=60000 +#学习率 +#learning_rate=1e-5 + +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 pointnet.py --data_path=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --drop_remainder=True > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ms/step $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'NR==1' | awk -F " " '{print$5}' | tr -cd "[0-9]"` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep " val_sparse_categorical_accuracy:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $17}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep " loss:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $8}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log -- Gitee From 83b0d632d6ed79e5388112763159842af1e30484 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:26:34 +0000 Subject: [PATCH 08/54] add TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/modelzoo_level.txt. --- .../pointnet_ID2531_for_TensorFlow2.X/modelzoo_level.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/modelzoo_level.txt diff --git a/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..0b49b4fb2 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_ID2531_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:OK +PrecisionStatus:OK \ No newline at end of file -- Gitee From 50df09ed3ecf9c860ddb64d8e735cf33e5d2da68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:28:24 +0000 Subject: [PATCH 09/54] =?UTF-8?q?PointNet=5FID2913=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- TensorFlow2/built-in/keras_sample/.gitignore | 2 + TensorFlow2/built-in/keras_sample/LICENSE | 51 ++ TensorFlow2/built-in/keras_sample/README.md | 233 +++++++++ .../built-in/keras_sample/README_BAK.md | 77 +++ TensorFlow2/built-in/keras_sample/evaluate.py | 199 ++++++++ .../built-in/keras_sample/modelzoo_level.txt | 3 + TensorFlow2/built-in/keras_sample/provider.py | 165 +++++++ .../built-in/keras_sample/requirements.txt | 0 TensorFlow2/built-in/keras_sample/train.py | 452 ++++++++++++++++++ .../built-in/keras_sample/train_real.py | 381 +++++++++++++++ 10 files changed, 1563 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/.gitignore create mode 100644 TensorFlow2/built-in/keras_sample/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/README.md create mode 100644 TensorFlow2/built-in/keras_sample/README_BAK.md create mode 100644 TensorFlow2/built-in/keras_sample/evaluate.py create mode 100644 TensorFlow2/built-in/keras_sample/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/provider.py create mode 100644 TensorFlow2/built-in/keras_sample/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/train.py create mode 100644 TensorFlow2/built-in/keras_sample/train_real.py diff --git a/TensorFlow2/built-in/keras_sample/.gitignore b/TensorFlow2/built-in/keras_sample/.gitignore new file mode 100644 index 000000000..8efb80c9a --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/.gitignore @@ -0,0 +1,2 @@ +/data/* +/log/* diff --git a/TensorFlow2/built-in/keras_sample/LICENSE b/TensorFlow2/built-in/keras_sample/LICENSE new file mode 100644 index 000000000..e93be0a6b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/LICENSE @@ -0,0 +1,51 @@ +PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation. + +Copyright (c) 2017, Geometric Computation Group of Stanford University + +The MIT License (MIT) + +Copyright (c) 2017 Charles R. Qi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +PointNet:针对3D分类和分割的点集深度学习。 + +斯坦福大学几何计算小组(c)2017版权所有 + +MIT许可证(MIT) + +版权所有(c)2017 Charles R.Qi + +特此授予获得副本的任何人免费的许可 +软件和相关文档文件(以下简称“软件”)的交易 +在软件中不受限制,包括但不限于权利 +使用,复制,修改,合并,发布,分发,再许可和/或出售 +本软件的副本,并允许本软件所针对的人 +具备以下条件: + +以上版权声明和此许可声明应包含在所有 +复制或实质性的软件部分。 + +本软件按“原样”提供,不提供任何形式的明示或明示保证。 +暗示(包括但不限于适销性的保证), +适用于特定目的和非侵权。在任何情况下都不会 +作者或版权持有人对任何索赔,损害或其他责任 +无论是由于合同,侵权或其他形式的诉讼而引起的责任, +与软件或软件的使用或其他交易无关或与之有关 +软件。 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/README.md b/TensorFlow2/built-in/keras_sample/README.md new file mode 100644 index 000000000..2e27ca0f6 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/README.md @@ -0,0 +1,233 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Instance Segmentation** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.04.11** + +**大小(Size):43M** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Official** + +**描述(Description):基于TensorFlow2.X框架的3D点云采样的图像分类和分割网络训练代码** + + +

概述

+ +## 简述 + +点云(point cloud)是一种非常重要的几何数据结构。由于点云的无规律性(irregular format),大部分研究者将点云转换为规律的3D体素网格(3D voxel grids)或者一组不同视角的2D图像。这种转换数据的方式,增加了数据的规模,同时也会带来一系列问题。PointNet是一种可以直接处理点云的神经网络,并且考虑了输入点云序列不变性的特征。PointNet提供了统一的应用架构,可以用于分类(classification),块分割(part segmentation),语义理解(semantic parsing)。尽管网络很简单,但是非常有效。从实验结果上看,它超越了经典的方法,至少也达到同样的水平。理论上,我们进行了分析,包括网络学习了什么,以及当数据被一定程度的干扰后,网络为什么能保持稳定。 + + + - 参考论文: + + https://arxiv.org/abs/1612.00593(https://arxiv.org/abs/1612.00593) + + - 参考实现: + https://github.com/keras-team/keras-io/blob/master/examples/vision/pointnet.py(https://github.com/keras-team/keras-io/blob/master/examples/vision/pointnet.py) + + + - 适配昇腾 AI 处理器的实现: + skip + + - 通过Git获取对应commit\_id的代码方法如下: + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + + + + +## 默认配置 + + +- 网络结构 + - 设计最大池化层(对称函数),用于聚合所有点的特征信息 + - 计算全局点云特征向量后,通过将全局特征与每个点特征连接起来,将全局特征反馈给每个点特征。然后我们在合并的点特征的基础上提取新的每点特征——这时,每点特征都能识别局部和全局信息 + - 通过一个小网络(T-net)来预测一个仿射变换矩阵,并直接将这个变换应用到输入点的坐标上。小网络与大网络相似,由点独立特征提取、最大池化和全连接层等基本模块组成。 + +- 训练超参(单卡): + - Batch size: 32 + - learning_rate:0.0015 + - num_point:2048 + - Train epoch: 250 + + +## 支持特性 + +| 特性列表 | 是否支持 | +|-------|------| +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +相关代码示例。 + +``` + config_proto = tf.ConfigProto(allow_soft_placement=True) + custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add() + custom_op.name = 'NpuOptimizer' + custom_op.parameter_map["use_off_line"].b = True + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") + config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF + session_config = npu_config_proto(config_proto=config_proto) +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + + +

快速上手

+ +## 数据集准备 + +1. 模型训练使用modelnet40_ply_hdf5_2048数据集,即ModelNet40模型训练出的点云数据(HDF5文件类型)。每个点云包含从形状表面均匀采样的 2048 个点。每个云都是零均值并归一化为一个单位球体。 +2. 安装 h5py。该代码已在 Ubuntu 14.04 上使用 Python 2.7、TensorFlow 1.0.1、CUDA 8.0 和 cuDNN 5.1 进行了测试。 +``` +sudo apt-get install libhdf5-dev +sudo pip install h5py +``` +3.log默认情况下,日志文件和网络参数将保存到文件夹中。HDF5 文件中ModelNet40模型的点云将自动下载 (416MB) 到数据文件夹。 + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2.1 设置单卡训练参数(脚本位于PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。 + + + ``` + batch_size=32 + #训练step + train_epochs=250 + #学习率 + learning_rate=0.0015 + ``` + + + + 2.2 单卡训练指令(PointNet_ID2913_for_TensorFlow2.X/test) + + ``` + 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡 + bash train_full_1p.sh --data_path=xx + 数据集应为h5类型,配置data_path时需指定为data这一层,例:--data_path=/home/data + ├─data + ├─ply_data_test0.h5* + ├─ply_data_test_0_id2file.json* + ├─ply_data_test1.h5* + ├─ply_data_test_1_id2file.json* + ├─ply_data_train0.h5* + ├─ply_data_train_0_id2file.json* + ├─ply_data_train1.h5* + ├─ply_data_train_1_id2file.json* + ├─ply_data_train2.h5* + ├─ply_data_train_2_id2file.json* + ├─ply_data_train3.h5* + ├─ply_data_train_3_id2file.json* + ├─ply_data_train4.h5* + ├─ply_data_train_4_id2file.json* + ├─shape_names.txt* + ├─test_files.txt* + ├─train_files.txt* + + ``` + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备 + +- 模型训练 + + 请参考“快速上手”章节 + +

高级参考

+ +## 脚本和示例代码 + + ├── README.md //说明文档 + ├── requirements.txt //依赖 + ├── modelzoo_level.txt //状态文件 + ├── provider.py //数据集处理脚本 + ├── train.py //网络训练脚本 + ├── models //网络结构定义脚本 + |—— pointnet_cls.py + |—— pointnet_cls_basic.py + |—— pointnet_seg.py + |—— transform_nets.py + ├── test + | |—— train_full_1p.sh //单卡训练脚本 + | |—— train_performance_1p.sh //单卡训练脚本 + ... + +## 脚本参数 + +``` +batch_size 训练batch_size +learning_rate 初始学习率 +max_epochs 最大训练epoch数 +num_point 每个点云包含从形状表面均匀采样的点数 +precision_mode default="allow_mix_precision", type=str,help='the path to save over dump data' +over_dump type=ast.literal_eval,help='if or not over detection, default is False' +data_dump_flag type=ast.literal_eval,help='data dump flag, default is False' +data_dump_step data dump step, default is 10 +profiling type=ast.literal_eval help='if or not profiling for performance debug, default is False' +profiling_dump_path type=str, help='the path to save profiling data' +over_dump_path type=str, help='the path to save over dump data' +data_dump_path type=str, help='the path to save dump data' +use_mixlist type=ast.literal_eval,help='use_mixlist flag, default is False' +fusion_off_flag type=ast.literal_eval,help='fusion_off flag, default is False' +mixlist_file type=str,help='mixlist file name, default is ops_info.json' +fusion_off_file type=str,help='fusion_off file name, default is fusion_switch.cfg' +auto_tune help='auto_tune flag, default is False' +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡训练。 +将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 diff --git a/TensorFlow2/built-in/keras_sample/README_BAK.md b/TensorFlow2/built-in/keras_sample/README_BAK.md new file mode 100644 index 000000000..6d7185b09 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/README_BAK.md @@ -0,0 +1,77 @@ +## PointNet: *Deep Learning on Point Sets for 3D Classification and Segmentation* +Created by Charles R. Qi, Hao Su, Kaichun Mo, Leonidas J. Guibas from Stanford University. + +![prediction example](https://github.com/charlesq34/pointnet/blob/master/doc/teaser.png) + +### Introduction +This work is based on our [arXiv tech report](https://arxiv.org/abs/1612.00593), which is going to appear in CVPR 2017. We proposed a novel deep net architecture for point clouds (as unordered point sets). You can also check our [project webpage](http://stanford.edu/~rqi/pointnet) for a deeper introduction. + +Point cloud is an important type of geometric data structure. Due to its irregular format, most researchers transform such data to regular 3D voxel grids or collections of images. This, however, renders data unnecessarily voluminous and causes issues. In this paper, we design a novel type of neural network that directly consumes point clouds, which well respects the permutation invariance of points in the input. Our network, named PointNet, provides a unified architecture for applications ranging from object classification, part segmentation, to scene semantic parsing. Though simple, PointNet is highly efficient and effective. + +In this repository, we release code and data for training a PointNet classification network on point clouds sampled from 3D shapes, as well as for training a part segmentation network on ShapeNet Part dataset. + +### Citation +If you find our work useful in your research, please consider citing: + + @article{qi2016pointnet, + title={PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation}, + author={Qi, Charles R and Su, Hao and Mo, Kaichun and Guibas, Leonidas J}, + journal={arXiv preprint arXiv:1612.00593}, + year={2016} + } + +### Installation + +Install TensorFlow. You may also need to install h5py. The code has been tested with Python 2.7, TensorFlow 1.0.1, CUDA 8.0 and cuDNN 5.1 on Ubuntu 14.04. + +If you are using PyTorch, you can find a third-party pytorch implementation here. + +To install h5py for Python: +```bash +sudo apt-get install libhdf5-dev +sudo pip install h5py +``` + +### Usage +To train a model to classify point clouds sampled from 3D shapes: + + python train.py + +Log files and network parameters will be saved to `log` folder in default. Point clouds of ModelNet40 models in HDF5 files will be automatically downloaded (416MB) to the data folder. Each point cloud contains 2048 points uniformly sampled from a shape surface. Each cloud is zero-mean and normalized into an unit sphere. There are also text files in `data/modelnet40_ply_hdf5_2048` specifying the ids of shapes in h5 files. + +To see HELP for the training script: + + python train.py -h + +We can use TensorBoard to view the network architecture and monitor the training progress. + + tensorboard --logdir log + +After the above training, we can evaluate the model and output some visualizations of the error cases. + + python evaluate.py --visu + +Point clouds that are wrongly classified will be saved to `dump` folder in default. We visualize the point cloud by rendering it into three-view images. + +If you'd like to prepare your own data, you can refer to some helper functions in `utils/data_prep_util.py` for saving and loading HDF5 files. + +### Part Segmentation +To train a model for object part segmentation, firstly download the data: + + cd part_seg + sh download_data.sh + +The downloading script will download ShapeNetPart dataset (around 1.08GB) and our prepared HDF5 files (around 346MB). + +Then you can run `train.py` and `test.py` in the `part_seg` folder for training and testing (computing mIoU for evaluation). + +### License +Our code is released under MIT License (see LICENSE file for details). + +### Selected Projects that Use PointNet + +* PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space by Qi et al. (NIPS 2017) A hierarchical feature learning framework on point clouds. The PointNet++ architecture applies PointNet recursively on a nested partitioning of the input point set. It also proposes novel layers for point clouds with non-uniform densities. +* Exploring Spatial Context for 3D Semantic Segmentation of Point Clouds by Engelmann et al. (ICCV 2017 workshop). This work extends PointNet for large-scale scene segmentation. +* PCPNET: Learning Local Shape Properties from Raw Point Clouds by Guerrero et al. (arXiv). The work adapts PointNet for local geometric properties (e.g. normal and curvature) estimation in noisy point clouds. +* VoxelNet: End-to-End Learning for Point Cloud Based 3D Object Detection by Zhou et al. from Apple (arXiv) This work studies 3D object detection using LiDAR point clouds. It splits space into voxels, use PointNet to learn local voxel features and then use 3D CNN for region proposal, object classification and 3D bounding box estimation. +* Frustum PointNets for 3D Object Detection from RGB-D Data by Qi et al. (arXiv) A novel framework for 3D object detection with RGB-D data. The method proposed has achieved first place on KITTI 3D object detection benchmark on all categories (last checked on 11/30/2017). diff --git a/TensorFlow2/built-in/keras_sample/evaluate.py b/TensorFlow2/built-in/keras_sample/evaluate.py new file mode 100644 index 000000000..749f8c7f8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/evaluate.py @@ -0,0 +1,199 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import tensorflow as tf +import numpy as np +import argparse +import socket +import importlib +import time +import os +import scipy.misc +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(BASE_DIR, 'models')) +sys.path.append(os.path.join(BASE_DIR, 'utils')) +import provider +import pc_util + + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') +parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]') +parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 1]') +parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]') +parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]') +parser.add_argument('--dump_dir', default='dump', help='dump folder path [dump]') +parser.add_argument('--visu', action='store_true', help='Whether to dump image for error case [default: False]') +FLAGS = parser.parse_args() + + +BATCH_SIZE = FLAGS.batch_size +NUM_POINT = FLAGS.num_point +MODEL_PATH = FLAGS.model_path +GPU_INDEX = FLAGS.gpu +MODEL = importlib.import_module(FLAGS.model) # import network module +DUMP_DIR = FLAGS.dump_dir +if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR) +LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w') +LOG_FOUT.write(str(FLAGS)+'\n') + +NUM_CLASSES = 40 +SHAPE_NAMES = [line.rstrip() for line in \ + open(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/shape_names.txt'))] + +HOSTNAME = socket.gethostname() + +# ModelNet40 official train/test split +TRAIN_FILES = provider.getDataFiles( \ + os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt')) +TEST_FILES = provider.getDataFiles(\ + os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt')) + +def log_string(out_str): + LOG_FOUT.write(out_str+'\n') + LOG_FOUT.flush() + print(out_str) + +def evaluate(num_votes): + is_training = False + + with tf.device('/cpu:0'): + pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) + is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=()) + + # simple model + pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl) + loss = MODEL.get_loss(pred, labels_pl, end_points) + + # Add ops to save and restore all the variables. + saver = tf.compat.v1.train.Saver() + + # Create a session + config = tf.compat.v1.ConfigProto() + config.gpu_options.allow_growth = True + config.allow_soft_placement = True + config.log_device_placement = True + sess = tf.compat.v1.Session(config=config) + + # Restore variables from disk. + saver.restore(sess, MODEL_PATH) + log_string("Model restored.") + + ops = {'pointclouds_pl': pointclouds_pl, + 'labels_pl': labels_pl, + 'is_training_pl': is_training_pl, + 'pred': pred, + 'loss': loss} + + eval_one_epoch(sess, ops, num_votes) + + +def eval_one_epoch(sess, ops, num_votes=1, topk=1): + error_cnt = 0 + is_training = False + total_correct = 0 + total_seen = 0 + loss_sum = 0 + total_seen_class = [0 for _ in range(NUM_CLASSES)] + total_correct_class = [0 for _ in range(NUM_CLASSES)] + fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w') + for fn in range(len(TEST_FILES)): + log_string('----'+str(fn)+'----') + current_data, current_label = provider.loadDataFile(TEST_FILES[fn]) + current_data = current_data[:,0:NUM_POINT,:] + current_label = np.squeeze(current_label) + print(current_data.shape) + + file_size = current_data.shape[0] + num_batches = file_size // BATCH_SIZE + print(file_size) + + for batch_idx in range(num_batches): + start_idx = batch_idx * BATCH_SIZE + end_idx = (batch_idx+1) * BATCH_SIZE + cur_batch_size = end_idx - start_idx + + # Aggregating BEG + batch_loss_sum = 0 # sum of losses for the batch + batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes + batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes + for vote_idx in range(num_votes): + rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :], + vote_idx/float(num_votes) * np.pi * 2) + feed_dict = {ops['pointclouds_pl']: rotated_data, + ops['labels_pl']: current_label[start_idx:end_idx], + ops['is_training_pl']: is_training} + loss_val, pred_val = sess.run([ops['loss'], ops['pred']], + feed_dict=feed_dict) + batch_pred_sum += pred_val + batch_pred_val = np.argmax(pred_val, 1) + for el_idx in range(cur_batch_size): + batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1 + batch_loss_sum += (loss_val * cur_batch_size / float(num_votes)) + # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1] + # pred_val = np.argmax(batch_pred_classes, 1) + pred_val = np.argmax(batch_pred_sum, 1) + # Aggregating END + + correct = np.sum(pred_val == current_label[start_idx:end_idx]) + # correct = np.sum(pred_val_topk[:,0:topk] == label_val) + total_correct += correct + total_seen += cur_batch_size + loss_sum += batch_loss_sum + + for i in range(start_idx, end_idx): + l = current_label[i] + total_seen_class[l] += 1 + total_correct_class[l] += (pred_val[i-start_idx] == l) + fout.write('%d, %d\n' % (pred_val[i-start_idx], l)) + + if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP! + img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l], + SHAPE_NAMES[pred_val[i-start_idx]]) + img_filename = os.path.join(DUMP_DIR, img_filename) + output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :])) + scipy.misc.imsave(img_filename, output_img) + error_cnt += 1 + + log_string('eval mean loss: %f' % (loss_sum / float(total_seen))) + log_string('eval accuracy: %f' % (total_correct / float(total_seen))) + log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) + + class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float) + for i, name in enumerate(SHAPE_NAMES): + log_string('%10s:\t%0.3f' % (name, class_accuracies[i])) + + + +if __name__=='__main__': + with tf.Graph().as_default(): + evaluate(num_votes=1) + LOG_FOUT.close() diff --git a/TensorFlow2/built-in/keras_sample/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/modelzoo_level.txt new file mode 100644 index 000000000..31529da2e --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:OK +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/provider.py b/TensorFlow2/built-in/keras_sample/provider.py new file mode 100644 index 000000000..18651c47f --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/provider.py @@ -0,0 +1,165 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import sys +import numpy as np +import h5py + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +# Download dataset for point cloud classification +# 拼接data路径 +''' +DATA_DIR = os.path.join(BASE_DIR, 'data') +# 如果没有路径,则创建文件夹 +if not os.path.exists(DATA_DIR): + os.mkdir(DATA_DIR) +# 若不存在指定的文件夹,则从指定url下载压缩包,并解压缩 +# 实际上不好用,zipfile下载不下来。所以mv和rm就都报错了。 +if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')): + www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip' + zipfile = os.path.basename(www) + os.system('wget %s; unzip %s' % (www, zipfile)) + os.system('mv %s %s' % (zipfile[:-4], DATA_DIR)) + os.system('rm %s' % (zipfile)) +''' + +# 把数据随机打乱 +def shuffle_data(data, labels): + """ Shuffle data and labels. + Input: + data: B,N,... numpy array + label: B,... numpy array + Return: + shuffled data, label and shuffle indices + """ + # 取标签长度 + idx = np.arange(len(labels)) + # 打乱索引 + np.random.shuffle(idx) + # 返回打乱的数据,标签和索引 + return data[idx, ...], labels[idx], idx + + +# 旋转点云 +def rotate_point_cloud(batch_data): + # 随机旋转点云以扩大数据集 + # 旋转是基于向上方向的每个形状 + # 输入: + # BxNx3阵列,原始batch的点云 + # 返回: + # BxNx3阵列,旋转的点云batch + """ Randomly rotate the point clouds to augument the dataset + rotation is per shape based along up direction + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, 0, sinval], + [0, 1, 0], + [-sinval, 0, cosval]]) + shape_pc = batch_data[k, ...] + rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + return rotated_data + + +# 按角度旋转点云 +def rotate_point_cloud_by_angle(batch_data, rotation_angle): + """ Rotate the point cloud along up direction with certain angle. + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + # rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, 0, sinval], + [0, 1, 0], + [-sinval, 0, cosval]]) + shape_pc = batch_data[k, ...] + rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + return rotated_data + + +# 抖动点云 +def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05): + """ Randomly jitter points. jittering is per point. + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, jittered batch of point clouds + """ + B, N, C = batch_data.shape + assert (clip > 0) + jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1 * clip, clip) + jittered_data += batch_data + return jittered_data + + +# 获得复数个数据文件 +def getDataFiles(list_filename): + return [line.rstrip() for line in open(list_filename)] + + +# 加载h5文件 +def load_h5(h5_filename): + f = h5py.File(h5_filename) + data = f['data'][:] + label = f['label'][:] + return (data, label) + + +# 获得单个数据文件 +def loadDataFile(filename): + return load_h5(filename) + + +# 加载h5数据标签段 +def load_h5_data_label_seg(h5_filename): + f = h5py.File(h5_filename) + data = f['data'][:] + label = f['label'][:] + seg = f['pid'][:] + return (data, label, seg) + + +# 用seg加载数据文件 +def loadDataFile_with_seg(filename): + return load_h5_data_label_seg(filename) diff --git a/TensorFlow2/built-in/keras_sample/requirements.txt b/TensorFlow2/built-in/keras_sample/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/train.py b/TensorFlow2/built-in/keras_sample/train.py new file mode 100644 index 000000000..4a6683530 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/train.py @@ -0,0 +1,452 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# import npu_device +# npu_device.open().as_default() + + +import argparse +# import math +# import h5py +import numpy as np +import tensorflow as tf +import socket +import importlib +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(BASE_DIR, 'models')) +sys.path.append(os.path.join(BASE_DIR, 'utils')) +import provider +# import tf_util +import time +import datetime +import ast +from npu_device.compat.v1.npu_init import * +import npu_device as npu +npu.compat.enable_v1() + +starttime = datetime.datetime.now() + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') +parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]') +parser.add_argument('--log_dir', default='log', help='Log dir [default: log]') +parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]') +parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]') +parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]') +parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]') +parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]') +parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]') +parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]') +parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]') +parser.add_argument('--data_path', type=str, default='', help='data path') +parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') +parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') +parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') +parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') +parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') +parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') +parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') +parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') +parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') +parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') +parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') +parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') +parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval,help='auto_tune flag, default is False') +FLAGS = parser.parse_args() + + +BATCH_SIZE = FLAGS.batch_size +NUM_POINT = FLAGS.num_point +MAX_EPOCH = FLAGS.max_epoch +BASE_LEARNING_RATE = FLAGS.learning_rate +GPU_INDEX = FLAGS.gpu +MOMENTUM = FLAGS.momentum +OPTIMIZER = FLAGS.optimizer +DECAY_STEP = FLAGS.decay_step +DECAY_RATE = FLAGS.decay_rate + +MODEL = importlib.import_module(FLAGS.model) # import network module +MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py') +LOG_DIR = FLAGS.log_dir +if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR) +os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def +os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure +LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w') +LOG_FOUT.write(str(FLAGS)+'\n') + +MAX_NUM_POINT = 2048 +NUM_CLASSES = 40 + +BN_INIT_DECAY = 0.5 +BN_DECAY_DECAY_RATE = 0.5 +BN_DECAY_DECAY_STEP = float(DECAY_STEP) +BN_DECAY_CLIP = 0.99 + +HOSTNAME = socket.gethostname() + +# ModelNet40 official train/test split +TRAIN_FILES = provider.getDataFiles( \ + os.path.join(FLAGS.data_path, 'modelnet40_ply_hdf5_2048/train_files.txt')) +TEST_FILES = provider.getDataFiles(\ + os.path.join(FLAGS.data_path, 'modelnet40_ply_hdf5_2048/test_files.txt')) + +def log_string(out_str): + LOG_FOUT.write(out_str+'\n') + LOG_FOUT.flush() + print(out_str) + + +# 计算指数衰减的学习率。训练时学习率最好随着训练衰减。 +# tf.train.exponential_decay函数实现指数衰减学习率。 +def get_learning_rate(batch): + # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。 + # 通过tf.train.exponential_decay函数实现指数衰减学习率。 + # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢, + # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。 + # tf.train.exponential_decay就是用来实现这个功能。 + # + # 步骤: + # 1.首先使用较大学习率(目的:为快速得到一个比较优的解); + # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定); + learning_rate = tf.compat.v1.train.exponential_decay( + BASE_LEARNING_RATE, # Base learning rate. + batch * BATCH_SIZE, # Current index into the dataset. + DECAY_STEP, # Decay step. + DECAY_RATE, # Decay rate. + staircase=True) + # 训练时学习率最好随着训练衰减,learning_rate最大取0.00001 (衰减后的学习率和0.00001取最大) + learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE! + return learning_rate + + +# 取得bn衰减 +# if the argument staircase is True, +# then global_step /decay_steps is an integer division and the decayed learning rate follows a staircase function. +# 计算衰减的Batch Normalization 的 decay。 +def get_bn_decay(batch): + # 指数衰减法 + + # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。 + # 通过tf.train.exponential_decay函数实现指数衰减学习率。 + # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢, + # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。 + # tf.train.exponential_decay就是用来实现这个功能。 + # + # 步骤: + # 1.首先使用较大学习率(目的:为快速得到一个比较优的解); + # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定); + bn_momentum = tf.compat.v1.train.exponential_decay( + BN_INIT_DECAY, + batch*BATCH_SIZE, + BN_DECAY_DECAY_STEP, + BN_DECAY_DECAY_RATE, + staircase=True) + # bn衰减0.99和1-衰减后的动量,取最小 + bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum) + return bn_decay + + +# 初始运行的训练函数。 +# 这一段主要是通过placeholder进行赋值, 模型的参数准备和构建整个训练网络(数据处理+loss+优化器),模型记录工作,最后进行训练. +def train(): + # 将这个类实例,也就是新生成的图作为整个 tensorflow 运行环境的默认图 + with tf.Graph().as_default(): + # 如果需要切换成CPU运算,可以调用tf.device(device_name)函数,其中device_name格式如 /cpu:0 其中的0表示设备号, + # TF不区分CPU的设备号,设置为0即可。GPU区分设备号 /gpu:0 和 /gpu:1 表示两张不同的显卡。 + # with tf.device('/gpu:'+str(GPU_INDEX)): + with tf.device('/gpu:0'): + # 使用了pointne_cls.py的placeholder_inputs()方法。 + # 取得占位符,点云,标签。 输入是 一批数据的数量,点的数量。 + # placeholder()函数是在神经网络构建graph的时候在模型中的占位,此时并没有把要输入的数据传入模型, + # 它只会分配必要的内存,用于传入外部数据。 + pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) + # 向指定好的对象中喂入数据:tf.placeholder() + # 取得占位符:是否在训练。 + is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=()) + print(is_training_pl) + + # Note the global_step=batch parameter to minimize. + # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains. + # 将 global_step = batch 参数最小化。 + # 这是在告诉优化器 在每次训练时 为你有用地增加'batch'参数。 + # 定义 batch = 0 + batch = tf.Variable(0) + # 取得bn衰减(自定义方法) + bn_decay = get_bn_decay(batch) + # 用来显示标量信息,一般在画loss,accuary时会用到这个函数。 + tf.compat.v1.summary.scalar('bn_decay', bn_decay) + + # Get model and loss + # 创建的数据处理网络为pred,调用 model\pointnet_cls 下的get_model()得到。由get_model()可知, + # pred的维度为B×N×40,40为分出的类别Channel数,对应40个分类标签。每个点的这40个值最大的一个的下标即为所预测的分类标签。 + # 首先使用共享参数的MLP对每个点进行特征提取,再使用MaxPooling在特征维进行池化操作, + # 使得网络对不同数量点的点云产生相同维度的特征向量,且输出对输入点的顺序产生不变性。 + # 在得到固定维度的特征向量之后,再使用一个MLP对其进行分类。 + pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay) + # 调用pointnet_cls下的get_loss() + loss = MODEL.get_loss(pred, labels_pl, end_points) + tf.compat.v1.summary.scalar('loss', loss) + + # tf.argmax(pred, 2) 返回pred C 这个维度的最大值索引返回相同维度的bool值矩阵 + # tf.equal() 比较两个张量对应位置是否相等 + correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64)) + # 压缩求和,用于降维 + accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE) + tf.compat.v1.summary.scalar('accuracy', accuracy) + + # Get training operator + # 取得学习率(自定义方法),获得衰减后的学习率,以及选择优化器optimizer。 + learning_rate = get_learning_rate(batch) + tf.compat.v1.summary.scalar('learning_rate', learning_rate) + if OPTIMIZER == 'momentum': + optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM) + elif OPTIMIZER == 'adam': + optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate) + # minimize的内部存在两个操作:(1)计算各个变量的梯度 (2)用梯度更新这些变量的值 + # (1)计算loss对指定val_list的梯度(导数),返回元组列表[(gradient,variable),…] + # (2)用计算得到的梯度来更新对应的变量(权重) + # 注意:在程序中global_step初始化为0,每次更新参数时,自动加1 + # 将minimize()分成两个步骤的原因:在某种情况下对梯度进行修正,防止梯度消失或者梯度爆炸 + train_op = optimizer.minimize(loss, global_step=batch) + + # Add ops to save and restore all the variables. + saver = tf.compat.v1.train.Saver() + + # Create a session + # 配置session 运行参数。 + # 创建sess的时候对sess进行参数配置 + config = tf.compat.v1.ConfigProto() + custom_op = config.graph_options.rewrite_options.custom_optimizers.add() + custom_op.name = 'NpuOptimizer' + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(FLAGS.precision_mode) + if FLAGS.data_dump_flag: + custom_op.parameter_map["enable_dump"].b = True + custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.data_dump_path) + custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes(FLAGS.data_dump_step) + custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all") + if FLAGS.over_dump: + custom_op.parameter_map["enable_dump_debug"].b = True + custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.over_dump_path) + custom_op.parameter_map["dump_debug_mode"].s = tf.compat.as_bytes("all") + if FLAGS.profiling: + custom_op.parameter_map["precision_mode"].b = True + profiling_options = '{"output":"' + FLAGS.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + custom_op.parameter_map["profiling_options"].s = tf.compat.as_bytes(profiling_options) + if FLAGS.use_mixlist and FLAGS.precision_mode=='allow_mix_precision': + custom_op.parameter_map["modify_mixlist"].s = tf.compat.as_bytes(FLAGS.mixlist_file) + if FLAGS.fusion_off_flag: + custom_op.parameter_map["sfusion_switch_file"].s = tf.compat.as_bytes(FLAGS.fusion_off_file) + if FLAGS.auto_tune: + custom_op.parameter_map["auto_tune_mode"].s = tf.compat.as_bytes("RL,GA") + config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 必须显式关闭 + config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF # 必须显式关闭 + + # =True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。 + config.gpu_options.allow_growth = True + # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行 + config.allow_soft_placement = True + # 在终端打印出各项操作是在哪个设备上运行的 + config.log_device_placement = False + # 创建 sess, 才能运行框架 + sess = tf.compat.v1.Session(config=config) + + # Add summary writers + #merged = tf.merge_all_summaries() + merged = tf.compat.v1.summary.merge_all() + train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'), + sess.graph) + test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test')) + + # Init variables + # 初始化参数,开始训练 + # train_one_epoch 函数用来训练一个epoch,eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的 + # accuracy和loss。每10个epoch保存1次模型。 + init = tf.compat.v1.global_variables_initializer() + # To fix the bug introduced in TF 0.12.1 as in + # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1 + #sess.run(init) + # 运行sess初始化所有的全局变量 + sess.run(init, {is_training_pl: True}) + + # ops 是一个字典,作为接口传入训练和评估 epoch 循环中。 + # pred 是数据处理网络模块;loss 是 损失函数;train_op 是优化器;batch 是当前的批次 + ops = {'pointclouds_pl': pointclouds_pl, + 'labels_pl': labels_pl, + 'is_training_pl': is_training_pl, + 'pred': pred, + 'loss': loss, + 'train_op': train_op, + 'merged': merged, + 'step': batch} + + for epoch in range(MAX_EPOCH): + # log(自定义方法) + log_string('**** EPOCH %03d ****' % (epoch)) + # 在同一个位置刷新输出 + sys.stdout.flush() + + # 训练一个批次(自定义方法) + # train_one_epoch 函数用来训练一个epoch + train_one_epoch(sess, ops, train_writer) + # 评估一个批次(自定义方法) + # eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的accuracy和loss + eval_one_epoch(sess, ops, test_writer) + + # Save the variables to disk. + # Save the variables to disk.每10个epoch保存1次模型 + if epoch % 10 == 0: + save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt")) + # log(自定义方法) + log_string("Model saved in file: %s" % save_path) + + +# provider.shuffle_data 函数随机打乱数据,返回打乱后的数据。 +# num_batches = file_size/BATCH_SIZE,计算在指定BATCH_SIZE下,训练1个epoch 需要几个mini-batch训练。 +def train_one_epoch(sess, ops, train_writer): + """ ops: dict mapping from string to tf ops """ + is_training = True + + # Shuffle train files + # 随机打乱训练数据 + train_file_idxs = np.arange(0, len(TRAIN_FILES)) + np.random.shuffle(train_file_idxs) + + L = [] + for fn in range(len(TRAIN_FILES)): + log_string('----' + str(fn) + '-----') + current_data, current_label = provider.loadDataFile(os.path.join(FLAGS.data_path, TRAIN_FILES[train_file_idxs[fn]])) + current_data = current_data[:,0:NUM_POINT,:] + current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) + current_label = np.squeeze(current_label) + + file_size = current_data.shape[0] + num_batches = file_size // BATCH_SIZE + + total_correct = 0 + total_seen = 0 + loss_sum = 0 + endtime = datetime.datetime.now() + if fn == 0: + TOTLE_TIME = (endtime - starttime).seconds + L.append(TOTLE_TIME) + + # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数 + + # total_senn,总损失loss_sum. + for batch_idx in range(num_batches): + start_time = time.time() + start_idx = batch_idx * BATCH_SIZE + end_idx = (batch_idx+1) * BATCH_SIZE + + # Augment batched point clouds by rotation and jittering + # 调用provider中rotate_point_cloud + rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :]) + jittered_data = provider.jitter_point_cloud(rotated_data) + feed_dict = {ops['pointclouds_pl']: jittered_data, + ops['labels_pl']: current_label[start_idx:end_idx], + ops['is_training_pl']: is_training,} + summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], + ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) + cost_time = time.time() - start_time + FPS = BATCH_SIZE / cost_time + # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据 + train_writer.add_summary(summary, step) + pred_val = np.argmax(pred_val, 1) + correct = np.sum(pred_val == current_label[start_idx:end_idx]) + total_correct += correct + total_seen += BATCH_SIZE + loss_sum += loss_val + + # 记录平均loss,以及平均accuracy。 + log_string('TOTLE_TIME : %.2f' % (float(L[0]))) + log_string('FPS : %.2f' % (float(FPS))) + log_string('mean loss: %f' % (loss_sum / float(num_batches))) + log_string('accuracy: %f' % (total_correct / float(total_seen))) + + +def eval_one_epoch(sess, ops, test_writer): + """ ops: dict mapping from string to tf ops """ + is_training = False + total_correct = 0 + total_seen = 0 + loss_sum = 0 + total_seen_class = [0 for _ in range(NUM_CLASSES)] + total_correct_class = [0 for _ in range(NUM_CLASSES)] + + for fn in range(len(TEST_FILES)): + log_string('----' + str(fn) + '-----') + current_data, current_label = provider.loadDataFile(os.path.join(FLAGS.data_path, TEST_FILES[fn])) + current_data = current_data[:,0:NUM_POINT,:] + current_label = np.squeeze(current_label) + + file_size = current_data.shape[0] + num_batches = file_size // BATCH_SIZE + + for batch_idx in range(num_batches): + start_idx = batch_idx * BATCH_SIZE + end_idx = (batch_idx+1) * BATCH_SIZE + + feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :], + ops['labels_pl']: current_label[start_idx:end_idx], + ops['is_training_pl']: is_training} + summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], + ops['loss'], ops['pred']], feed_dict=feed_dict) + pred_val = np.argmax(pred_val, 1) + correct = np.sum(pred_val == current_label[start_idx:end_idx]) + total_correct += correct + total_seen += BATCH_SIZE + loss_sum += (loss_val*BATCH_SIZE) + for i in range(start_idx, end_idx): + l = current_label[i] + total_seen_class[l] += 1 + total_correct_class[l] += (pred_val[i-start_idx] == l) + + log_string('eval mean loss: %f' % (loss_sum / float(total_seen))) + log_string('eval accuracy: %f'% (total_correct / float(total_seen))) + log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) + + + +if __name__ == "__main__": + train() + LOG_FOUT.close() diff --git a/TensorFlow2/built-in/keras_sample/train_real.py b/TensorFlow2/built-in/keras_sample/train_real.py new file mode 100644 index 000000000..34c60ca17 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/train_real.py @@ -0,0 +1,381 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import math +import h5py +import numpy as np +import tensorflow as tf +import socket +import importlib +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(BASE_DIR, 'models')) +sys.path.append(os.path.join(BASE_DIR, 'utils')) +import provider +import tf_util + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') +parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]') +parser.add_argument('--log_dir', default='log', help='Log dir [default: log]') +parser.add_argument('--num_point', type=int, default=4096, help='Point Number [256/512/1024/2048] [default: 1024]') +parser.add_argument('--max_epoch', type=int, default=100, help='Epoch to run [default: 250]') +parser.add_argument('--batch_size', type=int, default=5, help='Batch Size during training [default: 32]') +parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]') +parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]') +parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]') +parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]') +parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]') +FLAGS = parser.parse_args() + + +BATCH_SIZE = FLAGS.batch_size +NUM_POINT = FLAGS.num_point +MAX_EPOCH = FLAGS.max_epoch +BASE_LEARNING_RATE = FLAGS.learning_rate +GPU_INDEX = FLAGS.gpu +MOMENTUM = FLAGS.momentum +OPTIMIZER = FLAGS.optimizer +DECAY_STEP = FLAGS.decay_step +DECAY_RATE = FLAGS.decay_rate + +MODEL = importlib.import_module(FLAGS.model) # import network module +MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py') +LOG_DIR = FLAGS.log_dir +if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR) +os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def +os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure +LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w') +LOG_FOUT.write(str(FLAGS)+'\n') + +MAX_NUM_POINT = 4096 +NUM_CLASSES = 40 + +BN_INIT_DECAY = 0.5 +BN_DECAY_DECAY_RATE = 0.5 +BN_DECAY_DECAY_STEP = float(DECAY_STEP) +BN_DECAY_CLIP = 0.99 + +HOSTNAME = socket.gethostname() + +# ModelNet40 official train/test split +TRAIN_FILES = provider.getDataFiles( \ + os.path.join(BASE_DIR, 'data_real/train_files.txt')) +TEST_FILES = provider.getDataFiles(\ + os.path.join(BASE_DIR, 'data_real/test_files.txt')) +print(TRAIN_FILES) +print(TEST_FILES) + +def log_string(out_str): + LOG_FOUT.write(out_str+'\n') + LOG_FOUT.flush() + print(out_str) + + +# 计算指数衰减的学习率。训练时学习率最好随着训练衰减。 +# tf.train.exponential_decay函数实现指数衰减学习率。 +def get_learning_rate(batch): + # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。 + # 通过tf.train.exponential_decay函数实现指数衰减学习率。 + # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢, + # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。 + # tf.train.exponential_decay就是用来实现这个功能。 + # + # 步骤: + # 1.首先使用较大学习率(目的:为快速得到一个比较优的解); + # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定); + learning_rate = tf.compat.v1.train.exponential_decay( + BASE_LEARNING_RATE, # Base learning rate. + batch * BATCH_SIZE, # Current index into the dataset. + DECAY_STEP, # Decay step. + DECAY_RATE, # Decay rate. + staircase=True) + # 训练时学习率最好随着训练衰减,learning_rate最大取0.00001 (衰减后的学习率和0.00001取最大) + learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE! + return learning_rate + + +# 取得bn衰减 +# if the argument staircase is True, +# then global_step /decay_steps is an integer division and the decayed learning rate follows a staircase function. +# 计算衰减的Batch Normalization 的 decay。 +def get_bn_decay(batch): + # 指数衰减法 + + # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。 + # 通过tf.train.exponential_decay函数实现指数衰减学习率。 + # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢, + # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。 + # tf.train.exponential_decay就是用来实现这个功能。 + # + # 步骤: + # 1.首先使用较大学习率(目的:为快速得到一个比较优的解); + # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定); + bn_momentum = tf.compat.v1.train.exponential_decay( + BN_INIT_DECAY, + batch*BATCH_SIZE, + BN_DECAY_DECAY_STEP, + BN_DECAY_DECAY_RATE, + staircase=True) + # bn衰减0.99和1-衰减后的动量,取最小 + bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum) + return bn_decay + + +# 初始运行的训练函数。 +# 这一段主要是通过placeholder进行赋值, 模型的参数准备和构建整个训练网络(数据处理+loss+优化器),模型记录工作,最后进行训练. +def train(): + # 将这个类实例,也就是新生成的图作为整个 tensorflow 运行环境的默认图 + with tf.Graph().as_default(): + # 如果需要切换成CPU运算,可以调用tf.device(device_name)函数,其中device_name格式如 /cpu:0 其中的0表示设备号, + # TF不区分CPU的设备号,设置为0即可。GPU区分设备号 /gpu:0 和 /gpu:1 表示两张不同的显卡。 + # with tf.device('/gpu:'+str(GPU_INDEX)): + with tf.device('/cpu:0'): + # 使用了pointne_cls.py的placeholder_inputs()方法。 + # 取得占位符,点云,标签。 输入是 一批数据的数量,点的数量。 + # placeholder()函数是在神经网络构建graph的时候在模型中的占位,此时并没有把要输入的数据传入模型, + # 它只会分配必要的内存,用于传入外部数据。 + pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) + # 向指定好的对象中喂入数据:tf.placeholder() + # 取得占位符:是否在训练。 + is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=()) + print(is_training_pl) + + # Note the global_step=batch parameter to minimize. + # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains. + # 将 global_step = batch 参数最小化。 + # 这是在告诉优化器 在每次训练时 为你有用地增加'batch'参数。 + # 定义 batch = 0 + batch = tf.Variable(0) + # 取得bn衰减(自定义方法) + bn_decay = get_bn_decay(batch) + # 用来显示标量信息,一般在画loss,accuary时会用到这个函数。 + tf.compat.v1.summary.scalar('bn_decay', bn_decay) + + # Get model and loss + # 创建的数据处理网络为pred,调用 model\pointnet_cls 下的get_model()得到。由get_model()可知, + # pred的维度为B×N×40,40为分出的类别Channel数,对应40个分类标签。每个点的这40个值最大的一个的下标即为所预测的分类标签。 + # 首先使用共享参数的MLP对每个点进行特征提取,再使用MaxPooling在特征维进行池化操作, + # 使得网络对不同数量点的点云产生相同维度的特征向量,且输出对输入点的顺序产生不变性。 + # 在得到固定维度的特征向量之后,再使用一个MLP对其进行分类。 + pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay) + # 调用pointnet_cls下的get_loss() + loss = MODEL.get_loss(pred, labels_pl, end_points) + tf.compat.v1.summary.scalar('loss', loss) + + # tf.argmax(pred, 2) 返回pred C 这个维度的最大值索引返回相同维度的bool值矩阵 + # tf.equal() 比较两个张量对应位置是否相等 + correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64)) + # 压缩求和,用于降维 + accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE) + tf.compat.v1.summary.scalar('accuracy', accuracy) + + # Get training operator + # 取得学习率(自定义方法),获得衰减后的学习率,以及选择优化器optimizer。 + learning_rate = get_learning_rate(batch) + tf.compat.v1.summary.scalar('learning_rate', learning_rate) + if OPTIMIZER == 'momentum': + optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM) + elif OPTIMIZER == 'adam': + optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate) + # minimize的内部存在两个操作:(1)计算各个变量的梯度 (2)用梯度更新这些变量的值 + # (1)计算loss对指定val_list的梯度(导数),返回元组列表[(gradient,variable),…] + # (2)用计算得到的梯度来更新对应的变量(权重) + # 注意:在程序中global_step初始化为0,每次更新参数时,自动加1 + # 将minimize()分成两个步骤的原因:在某种情况下对梯度进行修正,防止梯度消失或者梯度爆炸 + train_op = optimizer.minimize(loss, global_step=batch) + + # Add ops to save and restore all the variables. + saver = tf.compat.v1.train.Saver() + + # Create a session + # 配置session 运行参数。 + # 创建sess的时候对sess进行参数配置 + config = tf.compat.v1.ConfigProto() + # =True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。 + config.gpu_options.allow_growth = True + # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行 + config.allow_soft_placement = True + # 在终端打印出各项操作是在哪个设备上运行的 + config.log_device_placement = False + # 创建 sess, 才能运行框架 + sess = tf.compat.v1.Session(config=config) + + # Add summary writers + #merged = tf.merge_all_summaries() + merged = tf.compat.v1.summary.merge_all() + train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'), + sess.graph) + test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test')) + + # Init variables + # 初始化参数,开始训练 + # train_one_epoch 函数用来训练一个epoch,eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的 + # accuracy和loss。每10个epoch保存1次模型。 + init = tf.compat.v1.global_variables_initializer() + # To fix the bug introduced in TF 0.12.1 as in + # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1 + #sess.run(init) + # 运行sess初始化所有的全局变量 + sess.run(init, {is_training_pl: True}) + + # ops 是一个字典,作为接口传入训练和评估 epoch 循环中。 + # pred 是数据处理网络模块;loss 是 损失函数;train_op 是优化器;batch 是当前的批次 + ops = {'pointclouds_pl': pointclouds_pl, + 'labels_pl': labels_pl, + 'is_training_pl': is_training_pl, + 'pred': pred, + 'loss': loss, + 'train_op': train_op, + 'merged': merged, + 'step': batch} + + for epoch in range(MAX_EPOCH): + # log(自定义方法) + log_string('**** EPOCH %03d ****' % (epoch)) + # 在同一个位置刷新输出 + sys.stdout.flush() + + # 训练一个批次(自定义方法) + # train_one_epoch 函数用来训练一个epoch + train_one_epoch(sess, ops, train_writer) + # 评估一个批次(自定义方法) + # eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的accuracy和loss + eval_one_epoch(sess, ops, test_writer) + + # Save the variables to disk. + # Save the variables to disk.每10个epoch保存1次模型 + if epoch % 10 == 0: + save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt")) + # log(自定义方法) + log_string("Model saved in file: %s" % save_path) + + +# provider.shuffle_data 函数随机打乱数据,返回打乱后的数据。 +# num_batches = file_size/BATCH_SIZE,计算在指定BATCH_SIZE下,训练1个epoch 需要几个mini-batch训练。 +def train_one_epoch(sess, ops, train_writer): + """ ops: dict mapping from string to tf ops """ + is_training = True + + # Shuffle train files + # 随机打乱训练数据 + train_file_idxs = np.arange(0, len(TRAIN_FILES)) + np.random.shuffle(train_file_idxs) + + for fn in range(len(TRAIN_FILES)): + log_string('----' + str(fn) + '-----') + current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]]) + current_data = current_data[:,0:NUM_POINT,:] + current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) + current_label = np.squeeze(current_label) + + file_size = current_data.shape[0] + num_batches = file_size // BATCH_SIZE + + total_correct = 0 + total_seen = 0 + loss_sum = 0 + + # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数 + # total_senn,总损失loss_sum. + for batch_idx in range(num_batches): + start_idx = batch_idx * BATCH_SIZE + end_idx = (batch_idx+1) * BATCH_SIZE + + # Augment batched point clouds by rotation and jittering + # 调用provider中rotate_point_cloud + rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :]) + jittered_data = provider.jitter_point_cloud(rotated_data) + feed_dict = {ops['pointclouds_pl']: jittered_data, + ops['labels_pl']: current_label[start_idx:end_idx], + ops['is_training_pl']: is_training,} + summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], + ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) + # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据 + train_writer.add_summary(summary, step) + pred_val = np.argmax(pred_val, 1) + correct = np.sum(pred_val == current_label[start_idx:end_idx]) + total_correct += correct + total_seen += BATCH_SIZE + loss_sum += loss_val + + # 记录平均loss,以及平均accuracy。 + log_string('mean loss: %f' % (loss_sum / float(num_batches))) + log_string('accuracy: %f' % (total_correct / float(total_seen))) + + +def eval_one_epoch(sess, ops, test_writer): + """ ops: dict mapping from string to tf ops """ + is_training = False + total_correct = 0 + total_seen = 0 + loss_sum = 0 + total_seen_class = [0 for _ in range(NUM_CLASSES)] + total_correct_class = [0 for _ in range(NUM_CLASSES)] + + for fn in range(len(TEST_FILES)): + log_string('----' + str(fn) + '-----') + current_data, current_label = provider.loadDataFile(TEST_FILES[fn]) + current_data = current_data[:,0:NUM_POINT,:] + current_label = np.squeeze(current_label) + + file_size = current_data.shape[0] + num_batches = file_size // BATCH_SIZE + + for batch_idx in range(num_batches): + start_idx = batch_idx * BATCH_SIZE + end_idx = (batch_idx+1) * BATCH_SIZE + + feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :], + ops['labels_pl']: current_label[start_idx:end_idx], + ops['is_training_pl']: is_training} + summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], + ops['loss'], ops['pred']], feed_dict=feed_dict) + pred_val = np.argmax(pred_val, 1) + correct = np.sum(pred_val == current_label[start_idx:end_idx]) + total_correct += correct + total_seen += BATCH_SIZE + loss_sum += (loss_val*BATCH_SIZE) + for i in range(start_idx, end_idx): + l = current_label[i] + total_seen_class[l] += 1 + total_correct_class[l] += (pred_val[i-start_idx] == l) + + log_string('eval mean loss: %f' % (loss_sum / float(total_seen))) + log_string('eval accuracy: %f'% (total_correct / float(total_seen))) + log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) + + + +if __name__ == "__main__": + train() + LOG_FOUT.close() -- Gitee From 8d4827b97b6bc14c2ec3ddd2d539b473c4a7f469 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:28:45 +0000 Subject: [PATCH 10/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/.gitignore?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- TensorFlow2/built-in/keras_sample/.gitignore | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 TensorFlow2/built-in/keras_sample/.gitignore diff --git a/TensorFlow2/built-in/keras_sample/.gitignore b/TensorFlow2/built-in/keras_sample/.gitignore deleted file mode 100644 index 8efb80c9a..000000000 --- a/TensorFlow2/built-in/keras_sample/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/data/* -/log/* -- Gitee From bb5dccb349692bd7c37daaa2b45419ac70869a9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:28:55 +0000 Subject: [PATCH 11/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/LICENSE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- TensorFlow2/built-in/keras_sample/LICENSE | 51 ----------------------- 1 file changed, 51 deletions(-) delete mode 100644 TensorFlow2/built-in/keras_sample/LICENSE diff --git a/TensorFlow2/built-in/keras_sample/LICENSE b/TensorFlow2/built-in/keras_sample/LICENSE deleted file mode 100644 index e93be0a6b..000000000 --- a/TensorFlow2/built-in/keras_sample/LICENSE +++ /dev/null @@ -1,51 +0,0 @@ -PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation. - -Copyright (c) 2017, Geometric Computation Group of Stanford University - -The MIT License (MIT) - -Copyright (c) 2017 Charles R. Qi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -PointNet:针对3D分类和分割的点集深度学习。 - -斯坦福大学几何计算小组(c)2017版权所有 - -MIT许可证(MIT) - -版权所有(c)2017 Charles R.Qi - -特此授予获得副本的任何人免费的许可 -软件和相关文档文件(以下简称“软件”)的交易 -在软件中不受限制,包括但不限于权利 -使用,复制,修改,合并,发布,分发,再许可和/或出售 -本软件的副本,并允许本软件所针对的人 -具备以下条件: - -以上版权声明和此许可声明应包含在所有 -复制或实质性的软件部分。 - -本软件按“原样”提供,不提供任何形式的明示或明示保证。 -暗示(包括但不限于适销性的保证), -适用于特定目的和非侵权。在任何情况下都不会 -作者或版权持有人对任何索赔,损害或其他责任 -无论是由于合同,侵权或其他形式的诉讼而引起的责任, -与软件或软件的使用或其他交易无关或与之有关 -软件。 \ No newline at end of file -- Gitee From 8f49f56d97e674219fe397ad43549d2a4ea3886e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:29:05 +0000 Subject: [PATCH 12/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/README.md?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- TensorFlow2/built-in/keras_sample/README.md | 233 -------------------- 1 file changed, 233 deletions(-) delete mode 100644 TensorFlow2/built-in/keras_sample/README.md diff --git a/TensorFlow2/built-in/keras_sample/README.md b/TensorFlow2/built-in/keras_sample/README.md deleted file mode 100644 index 2e27ca0f6..000000000 --- a/TensorFlow2/built-in/keras_sample/README.md +++ /dev/null @@ -1,233 +0,0 @@ -- [基本信息](#基本信息.md) -- [概述](#概述.md) -- [训练环境准备](#训练环境准备.md) -- [快速上手](#快速上手.md) -- [迁移学习指导](#迁移学习指导.md) -- [高级参考](#高级参考.md) - -

基本信息

- -**发布者(Publisher):Huawei** - -**应用领域(Application Domain):Instance Segmentation** - -**版本(Version):1.1** - -**修改时间(Modified) :2022.04.11** - -**大小(Size):43M** - -**框架(Framework):TensorFlow_2.6.2** - -**模型格式(Model Format):ckpt** - -**精度(Precision):Mixed** - -**处理器(Processor):昇腾910** - -**应用级别(Categories):Official** - -**描述(Description):基于TensorFlow2.X框架的3D点云采样的图像分类和分割网络训练代码** - - -

概述

- -## 简述 - -点云(point cloud)是一种非常重要的几何数据结构。由于点云的无规律性(irregular format),大部分研究者将点云转换为规律的3D体素网格(3D voxel grids)或者一组不同视角的2D图像。这种转换数据的方式,增加了数据的规模,同时也会带来一系列问题。PointNet是一种可以直接处理点云的神经网络,并且考虑了输入点云序列不变性的特征。PointNet提供了统一的应用架构,可以用于分类(classification),块分割(part segmentation),语义理解(semantic parsing)。尽管网络很简单,但是非常有效。从实验结果上看,它超越了经典的方法,至少也达到同样的水平。理论上,我们进行了分析,包括网络学习了什么,以及当数据被一定程度的干扰后,网络为什么能保持稳定。 - - - - 参考论文: - - https://arxiv.org/abs/1612.00593(https://arxiv.org/abs/1612.00593) - - - 参考实现: - https://github.com/keras-team/keras-io/blob/master/examples/vision/pointnet.py(https://github.com/keras-team/keras-io/blob/master/examples/vision/pointnet.py) - - - - 适配昇腾 AI 处理器的实现: - skip - - - 通过Git获取对应commit\_id的代码方法如下: - ``` - git clone {repository_url} # 克隆仓库的代码 - cd {repository_name} # 切换到模型的代码仓目录 - git checkout {branch} # 切换到对应分支 - git reset --hard {commit_id} # 代码设置到对应的commit_id - cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 - ``` - - - - -## 默认配置 - - -- 网络结构 - - 设计最大池化层(对称函数),用于聚合所有点的特征信息 - - 计算全局点云特征向量后,通过将全局特征与每个点特征连接起来,将全局特征反馈给每个点特征。然后我们在合并的点特征的基础上提取新的每点特征——这时,每点特征都能识别局部和全局信息 - - 通过一个小网络(T-net)来预测一个仿射变换矩阵,并直接将这个变换应用到输入点的坐标上。小网络与大网络相似,由点独立特征提取、最大池化和全连接层等基本模块组成。 - -- 训练超参(单卡): - - Batch size: 32 - - learning_rate:0.0015 - - num_point:2048 - - Train epoch: 250 - - -## 支持特性 - -| 特性列表 | 是否支持 | -|-------|------| -| 分布式训练 | 否 | -| 混合精度 | 是 | -| 数据并行 | 否 | - -## 混合精度训练 - -昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 - -## 开启混合精度 -相关代码示例。 - -``` - config_proto = tf.ConfigProto(allow_soft_placement=True) - custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add() - custom_op.name = 'NpuOptimizer' - custom_op.parameter_map["use_off_line"].b = True - custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") - config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF - session_config = npu_config_proto(config_proto=config_proto) -``` - -

训练环境准备

- -- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 -- 运行以下命令安装依赖。 -``` -pip3 install requirements.txt -``` -说明:依赖配置文件requirements.txt文件位于模型的根目录 - - -

快速上手

- -## 数据集准备 - -1. 模型训练使用modelnet40_ply_hdf5_2048数据集,即ModelNet40模型训练出的点云数据(HDF5文件类型)。每个点云包含从形状表面均匀采样的 2048 个点。每个云都是零均值并归一化为一个单位球体。 -2. 安装 h5py。该代码已在 Ubuntu 14.04 上使用 Python 2.7、TensorFlow 1.0.1、CUDA 8.0 和 cuDNN 5.1 进行了测试。 -``` -sudo apt-get install libhdf5-dev -sudo pip install h5py -``` -3.log默认情况下,日志文件和网络参数将保存到文件夹中。HDF5 文件中ModelNet40模型的点云将自动下载 (416MB) 到数据文件夹。 - -## 模型训练 -- 单击“立即下载”,并选择合适的下载方式下载源码包。 -- 开始训练。 - - 1. 启动训练之前,首先要配置程序运行相关环境变量。 - - 环境变量配置信息参见: - - [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) - - - 2. 单卡训练 - - 2.1 设置单卡训练参数(脚本位于PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。 - - - ``` - batch_size=32 - #训练step - train_epochs=250 - #学习率 - learning_rate=0.0015 - ``` - - - - 2.2 单卡训练指令(PointNet_ID2913_for_TensorFlow2.X/test) - - ``` - 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡 - bash train_full_1p.sh --data_path=xx - 数据集应为h5类型,配置data_path时需指定为data这一层,例:--data_path=/home/data - ├─data - ├─ply_data_test0.h5* - ├─ply_data_test_0_id2file.json* - ├─ply_data_test1.h5* - ├─ply_data_test_1_id2file.json* - ├─ply_data_train0.h5* - ├─ply_data_train_0_id2file.json* - ├─ply_data_train1.h5* - ├─ply_data_train_1_id2file.json* - ├─ply_data_train2.h5* - ├─ply_data_train_2_id2file.json* - ├─ply_data_train3.h5* - ├─ply_data_train_3_id2file.json* - ├─ply_data_train4.h5* - ├─ply_data_train_4_id2file.json* - ├─shape_names.txt* - ├─test_files.txt* - ├─train_files.txt* - - ``` - -

迁移学习指导

- -- 数据集准备。 - - 1. 获取数据。 - 请参见“快速上手”中的数据集准备 - -- 模型训练 - - 请参考“快速上手”章节 - -

高级参考

- -## 脚本和示例代码 - - ├── README.md //说明文档 - ├── requirements.txt //依赖 - ├── modelzoo_level.txt //状态文件 - ├── provider.py //数据集处理脚本 - ├── train.py //网络训练脚本 - ├── models //网络结构定义脚本 - |—— pointnet_cls.py - |—— pointnet_cls_basic.py - |—— pointnet_seg.py - |—— transform_nets.py - ├── test - | |—— train_full_1p.sh //单卡训练脚本 - | |—— train_performance_1p.sh //单卡训练脚本 - ... - -## 脚本参数 - -``` -batch_size 训练batch_size -learning_rate 初始学习率 -max_epochs 最大训练epoch数 -num_point 每个点云包含从形状表面均匀采样的点数 -precision_mode default="allow_mix_precision", type=str,help='the path to save over dump data' -over_dump type=ast.literal_eval,help='if or not over detection, default is False' -data_dump_flag type=ast.literal_eval,help='data dump flag, default is False' -data_dump_step data dump step, default is 10 -profiling type=ast.literal_eval help='if or not profiling for performance debug, default is False' -profiling_dump_path type=str, help='the path to save profiling data' -over_dump_path type=str, help='the path to save over dump data' -data_dump_path type=str, help='the path to save dump data' -use_mixlist type=ast.literal_eval,help='use_mixlist flag, default is False' -fusion_off_flag type=ast.literal_eval,help='fusion_off flag, default is False' -mixlist_file type=str,help='mixlist file name, default is ops_info.json' -fusion_off_file type=str,help='fusion_off file name, default is fusion_switch.cfg' -auto_tune help='auto_tune flag, default is False' -``` - -## 训练过程 - -通过“模型训练”中的训练指令启动单卡训练。 -将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 -- Gitee From 1f3182da6964cbd46e8ad016c47fdd56e1070c5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:29:12 +0000 Subject: [PATCH 13/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/evaluate.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- TensorFlow2/built-in/keras_sample/evaluate.py | 199 ------------------ 1 file changed, 199 deletions(-) delete mode 100644 TensorFlow2/built-in/keras_sample/evaluate.py diff --git a/TensorFlow2/built-in/keras_sample/evaluate.py b/TensorFlow2/built-in/keras_sample/evaluate.py deleted file mode 100644 index 749f8c7f8..000000000 --- a/TensorFlow2/built-in/keras_sample/evaluate.py +++ /dev/null @@ -1,199 +0,0 @@ -# -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import tensorflow as tf -import numpy as np -import argparse -import socket -import importlib -import time -import os -import scipy.misc -import sys -BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(BASE_DIR) -sys.path.append(os.path.join(BASE_DIR, 'models')) -sys.path.append(os.path.join(BASE_DIR, 'utils')) -import provider -import pc_util - - -parser = argparse.ArgumentParser() -parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') -parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]') -parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 1]') -parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]') -parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]') -parser.add_argument('--dump_dir', default='dump', help='dump folder path [dump]') -parser.add_argument('--visu', action='store_true', help='Whether to dump image for error case [default: False]') -FLAGS = parser.parse_args() - - -BATCH_SIZE = FLAGS.batch_size -NUM_POINT = FLAGS.num_point -MODEL_PATH = FLAGS.model_path -GPU_INDEX = FLAGS.gpu -MODEL = importlib.import_module(FLAGS.model) # import network module -DUMP_DIR = FLAGS.dump_dir -if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR) -LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w') -LOG_FOUT.write(str(FLAGS)+'\n') - -NUM_CLASSES = 40 -SHAPE_NAMES = [line.rstrip() for line in \ - open(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/shape_names.txt'))] - -HOSTNAME = socket.gethostname() - -# ModelNet40 official train/test split -TRAIN_FILES = provider.getDataFiles( \ - os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt')) -TEST_FILES = provider.getDataFiles(\ - os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt')) - -def log_string(out_str): - LOG_FOUT.write(out_str+'\n') - LOG_FOUT.flush() - print(out_str) - -def evaluate(num_votes): - is_training = False - - with tf.device('/cpu:0'): - pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) - is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=()) - - # simple model - pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl) - loss = MODEL.get_loss(pred, labels_pl, end_points) - - # Add ops to save and restore all the variables. - saver = tf.compat.v1.train.Saver() - - # Create a session - config = tf.compat.v1.ConfigProto() - config.gpu_options.allow_growth = True - config.allow_soft_placement = True - config.log_device_placement = True - sess = tf.compat.v1.Session(config=config) - - # Restore variables from disk. - saver.restore(sess, MODEL_PATH) - log_string("Model restored.") - - ops = {'pointclouds_pl': pointclouds_pl, - 'labels_pl': labels_pl, - 'is_training_pl': is_training_pl, - 'pred': pred, - 'loss': loss} - - eval_one_epoch(sess, ops, num_votes) - - -def eval_one_epoch(sess, ops, num_votes=1, topk=1): - error_cnt = 0 - is_training = False - total_correct = 0 - total_seen = 0 - loss_sum = 0 - total_seen_class = [0 for _ in range(NUM_CLASSES)] - total_correct_class = [0 for _ in range(NUM_CLASSES)] - fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w') - for fn in range(len(TEST_FILES)): - log_string('----'+str(fn)+'----') - current_data, current_label = provider.loadDataFile(TEST_FILES[fn]) - current_data = current_data[:,0:NUM_POINT,:] - current_label = np.squeeze(current_label) - print(current_data.shape) - - file_size = current_data.shape[0] - num_batches = file_size // BATCH_SIZE - print(file_size) - - for batch_idx in range(num_batches): - start_idx = batch_idx * BATCH_SIZE - end_idx = (batch_idx+1) * BATCH_SIZE - cur_batch_size = end_idx - start_idx - - # Aggregating BEG - batch_loss_sum = 0 # sum of losses for the batch - batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes - batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes - for vote_idx in range(num_votes): - rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :], - vote_idx/float(num_votes) * np.pi * 2) - feed_dict = {ops['pointclouds_pl']: rotated_data, - ops['labels_pl']: current_label[start_idx:end_idx], - ops['is_training_pl']: is_training} - loss_val, pred_val = sess.run([ops['loss'], ops['pred']], - feed_dict=feed_dict) - batch_pred_sum += pred_val - batch_pred_val = np.argmax(pred_val, 1) - for el_idx in range(cur_batch_size): - batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1 - batch_loss_sum += (loss_val * cur_batch_size / float(num_votes)) - # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1] - # pred_val = np.argmax(batch_pred_classes, 1) - pred_val = np.argmax(batch_pred_sum, 1) - # Aggregating END - - correct = np.sum(pred_val == current_label[start_idx:end_idx]) - # correct = np.sum(pred_val_topk[:,0:topk] == label_val) - total_correct += correct - total_seen += cur_batch_size - loss_sum += batch_loss_sum - - for i in range(start_idx, end_idx): - l = current_label[i] - total_seen_class[l] += 1 - total_correct_class[l] += (pred_val[i-start_idx] == l) - fout.write('%d, %d\n' % (pred_val[i-start_idx], l)) - - if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP! - img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l], - SHAPE_NAMES[pred_val[i-start_idx]]) - img_filename = os.path.join(DUMP_DIR, img_filename) - output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :])) - scipy.misc.imsave(img_filename, output_img) - error_cnt += 1 - - log_string('eval mean loss: %f' % (loss_sum / float(total_seen))) - log_string('eval accuracy: %f' % (total_correct / float(total_seen))) - log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) - - class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float) - for i, name in enumerate(SHAPE_NAMES): - log_string('%10s:\t%0.3f' % (name, class_accuracies[i])) - - - -if __name__=='__main__': - with tf.Graph().as_default(): - evaluate(num_votes=1) - LOG_FOUT.close() -- Gitee From 493320e8b7309fdf70beb2cd210e74de6ed07dfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:29:19 +0000 Subject: [PATCH 14/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/provider.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- TensorFlow2/built-in/keras_sample/provider.py | 165 ------------------ 1 file changed, 165 deletions(-) delete mode 100644 TensorFlow2/built-in/keras_sample/provider.py diff --git a/TensorFlow2/built-in/keras_sample/provider.py b/TensorFlow2/built-in/keras_sample/provider.py deleted file mode 100644 index 18651c47f..000000000 --- a/TensorFlow2/built-in/keras_sample/provider.py +++ /dev/null @@ -1,165 +0,0 @@ -# -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import sys -import numpy as np -import h5py - -BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(BASE_DIR) - -# Download dataset for point cloud classification -# 拼接data路径 -''' -DATA_DIR = os.path.join(BASE_DIR, 'data') -# 如果没有路径,则创建文件夹 -if not os.path.exists(DATA_DIR): - os.mkdir(DATA_DIR) -# 若不存在指定的文件夹,则从指定url下载压缩包,并解压缩 -# 实际上不好用,zipfile下载不下来。所以mv和rm就都报错了。 -if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')): - www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip' - zipfile = os.path.basename(www) - os.system('wget %s; unzip %s' % (www, zipfile)) - os.system('mv %s %s' % (zipfile[:-4], DATA_DIR)) - os.system('rm %s' % (zipfile)) -''' - -# 把数据随机打乱 -def shuffle_data(data, labels): - """ Shuffle data and labels. - Input: - data: B,N,... numpy array - label: B,... numpy array - Return: - shuffled data, label and shuffle indices - """ - # 取标签长度 - idx = np.arange(len(labels)) - # 打乱索引 - np.random.shuffle(idx) - # 返回打乱的数据,标签和索引 - return data[idx, ...], labels[idx], idx - - -# 旋转点云 -def rotate_point_cloud(batch_data): - # 随机旋转点云以扩大数据集 - # 旋转是基于向上方向的每个形状 - # 输入: - # BxNx3阵列,原始batch的点云 - # 返回: - # BxNx3阵列,旋转的点云batch - """ Randomly rotate the point clouds to augument the dataset - rotation is per shape based along up direction - Input: - BxNx3 array, original batch of point clouds - Return: - BxNx3 array, rotated batch of point clouds - """ - rotated_data = np.zeros(batch_data.shape, dtype=np.float32) - for k in range(batch_data.shape[0]): - rotation_angle = np.random.uniform() * 2 * np.pi - cosval = np.cos(rotation_angle) - sinval = np.sin(rotation_angle) - rotation_matrix = np.array([[cosval, 0, sinval], - [0, 1, 0], - [-sinval, 0, cosval]]) - shape_pc = batch_data[k, ...] - rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) - return rotated_data - - -# 按角度旋转点云 -def rotate_point_cloud_by_angle(batch_data, rotation_angle): - """ Rotate the point cloud along up direction with certain angle. - Input: - BxNx3 array, original batch of point clouds - Return: - BxNx3 array, rotated batch of point clouds - """ - rotated_data = np.zeros(batch_data.shape, dtype=np.float32) - for k in range(batch_data.shape[0]): - # rotation_angle = np.random.uniform() * 2 * np.pi - cosval = np.cos(rotation_angle) - sinval = np.sin(rotation_angle) - rotation_matrix = np.array([[cosval, 0, sinval], - [0, 1, 0], - [-sinval, 0, cosval]]) - shape_pc = batch_data[k, ...] - rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) - return rotated_data - - -# 抖动点云 -def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05): - """ Randomly jitter points. jittering is per point. - Input: - BxNx3 array, original batch of point clouds - Return: - BxNx3 array, jittered batch of point clouds - """ - B, N, C = batch_data.shape - assert (clip > 0) - jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1 * clip, clip) - jittered_data += batch_data - return jittered_data - - -# 获得复数个数据文件 -def getDataFiles(list_filename): - return [line.rstrip() for line in open(list_filename)] - - -# 加载h5文件 -def load_h5(h5_filename): - f = h5py.File(h5_filename) - data = f['data'][:] - label = f['label'][:] - return (data, label) - - -# 获得单个数据文件 -def loadDataFile(filename): - return load_h5(filename) - - -# 加载h5数据标签段 -def load_h5_data_label_seg(h5_filename): - f = h5py.File(h5_filename) - data = f['data'][:] - label = f['label'][:] - seg = f['pid'][:] - return (data, label, seg) - - -# 用seg加载数据文件 -def loadDataFile_with_seg(filename): - return load_h5_data_label_seg(filename) -- Gitee From 04a1e98a2dfd928d2b66cd02733528c6f7c9a111 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:29:25 +0000 Subject: [PATCH 15/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/train=5Freal.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../built-in/keras_sample/train_real.py | 381 ------------------ 1 file changed, 381 deletions(-) delete mode 100644 TensorFlow2/built-in/keras_sample/train_real.py diff --git a/TensorFlow2/built-in/keras_sample/train_real.py b/TensorFlow2/built-in/keras_sample/train_real.py deleted file mode 100644 index 34c60ca17..000000000 --- a/TensorFlow2/built-in/keras_sample/train_real.py +++ /dev/null @@ -1,381 +0,0 @@ -# -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import math -import h5py -import numpy as np -import tensorflow as tf -import socket -import importlib -import os -import sys -BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(BASE_DIR) -sys.path.append(os.path.join(BASE_DIR, 'models')) -sys.path.append(os.path.join(BASE_DIR, 'utils')) -import provider -import tf_util - -parser = argparse.ArgumentParser() -parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') -parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]') -parser.add_argument('--log_dir', default='log', help='Log dir [default: log]') -parser.add_argument('--num_point', type=int, default=4096, help='Point Number [256/512/1024/2048] [default: 1024]') -parser.add_argument('--max_epoch', type=int, default=100, help='Epoch to run [default: 250]') -parser.add_argument('--batch_size', type=int, default=5, help='Batch Size during training [default: 32]') -parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]') -parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]') -parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]') -parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]') -parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]') -FLAGS = parser.parse_args() - - -BATCH_SIZE = FLAGS.batch_size -NUM_POINT = FLAGS.num_point -MAX_EPOCH = FLAGS.max_epoch -BASE_LEARNING_RATE = FLAGS.learning_rate -GPU_INDEX = FLAGS.gpu -MOMENTUM = FLAGS.momentum -OPTIMIZER = FLAGS.optimizer -DECAY_STEP = FLAGS.decay_step -DECAY_RATE = FLAGS.decay_rate - -MODEL = importlib.import_module(FLAGS.model) # import network module -MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py') -LOG_DIR = FLAGS.log_dir -if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR) -os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def -os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure -LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w') -LOG_FOUT.write(str(FLAGS)+'\n') - -MAX_NUM_POINT = 4096 -NUM_CLASSES = 40 - -BN_INIT_DECAY = 0.5 -BN_DECAY_DECAY_RATE = 0.5 -BN_DECAY_DECAY_STEP = float(DECAY_STEP) -BN_DECAY_CLIP = 0.99 - -HOSTNAME = socket.gethostname() - -# ModelNet40 official train/test split -TRAIN_FILES = provider.getDataFiles( \ - os.path.join(BASE_DIR, 'data_real/train_files.txt')) -TEST_FILES = provider.getDataFiles(\ - os.path.join(BASE_DIR, 'data_real/test_files.txt')) -print(TRAIN_FILES) -print(TEST_FILES) - -def log_string(out_str): - LOG_FOUT.write(out_str+'\n') - LOG_FOUT.flush() - print(out_str) - - -# 计算指数衰减的学习率。训练时学习率最好随着训练衰减。 -# tf.train.exponential_decay函数实现指数衰减学习率。 -def get_learning_rate(batch): - # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。 - # 通过tf.train.exponential_decay函数实现指数衰减学习率。 - # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢, - # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。 - # tf.train.exponential_decay就是用来实现这个功能。 - # - # 步骤: - # 1.首先使用较大学习率(目的:为快速得到一个比较优的解); - # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定); - learning_rate = tf.compat.v1.train.exponential_decay( - BASE_LEARNING_RATE, # Base learning rate. - batch * BATCH_SIZE, # Current index into the dataset. - DECAY_STEP, # Decay step. - DECAY_RATE, # Decay rate. - staircase=True) - # 训练时学习率最好随着训练衰减,learning_rate最大取0.00001 (衰减后的学习率和0.00001取最大) - learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE! - return learning_rate - - -# 取得bn衰减 -# if the argument staircase is True, -# then global_step /decay_steps is an integer division and the decayed learning rate follows a staircase function. -# 计算衰减的Batch Normalization 的 decay。 -def get_bn_decay(batch): - # 指数衰减法 - - # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。 - # 通过tf.train.exponential_decay函数实现指数衰减学习率。 - # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢, - # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。 - # tf.train.exponential_decay就是用来实现这个功能。 - # - # 步骤: - # 1.首先使用较大学习率(目的:为快速得到一个比较优的解); - # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定); - bn_momentum = tf.compat.v1.train.exponential_decay( - BN_INIT_DECAY, - batch*BATCH_SIZE, - BN_DECAY_DECAY_STEP, - BN_DECAY_DECAY_RATE, - staircase=True) - # bn衰减0.99和1-衰减后的动量,取最小 - bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum) - return bn_decay - - -# 初始运行的训练函数。 -# 这一段主要是通过placeholder进行赋值, 模型的参数准备和构建整个训练网络(数据处理+loss+优化器),模型记录工作,最后进行训练. -def train(): - # 将这个类实例,也就是新生成的图作为整个 tensorflow 运行环境的默认图 - with tf.Graph().as_default(): - # 如果需要切换成CPU运算,可以调用tf.device(device_name)函数,其中device_name格式如 /cpu:0 其中的0表示设备号, - # TF不区分CPU的设备号,设置为0即可。GPU区分设备号 /gpu:0 和 /gpu:1 表示两张不同的显卡。 - # with tf.device('/gpu:'+str(GPU_INDEX)): - with tf.device('/cpu:0'): - # 使用了pointne_cls.py的placeholder_inputs()方法。 - # 取得占位符,点云,标签。 输入是 一批数据的数量,点的数量。 - # placeholder()函数是在神经网络构建graph的时候在模型中的占位,此时并没有把要输入的数据传入模型, - # 它只会分配必要的内存,用于传入外部数据。 - pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) - # 向指定好的对象中喂入数据:tf.placeholder() - # 取得占位符:是否在训练。 - is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=()) - print(is_training_pl) - - # Note the global_step=batch parameter to minimize. - # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains. - # 将 global_step = batch 参数最小化。 - # 这是在告诉优化器 在每次训练时 为你有用地增加'batch'参数。 - # 定义 batch = 0 - batch = tf.Variable(0) - # 取得bn衰减(自定义方法) - bn_decay = get_bn_decay(batch) - # 用来显示标量信息,一般在画loss,accuary时会用到这个函数。 - tf.compat.v1.summary.scalar('bn_decay', bn_decay) - - # Get model and loss - # 创建的数据处理网络为pred,调用 model\pointnet_cls 下的get_model()得到。由get_model()可知, - # pred的维度为B×N×40,40为分出的类别Channel数,对应40个分类标签。每个点的这40个值最大的一个的下标即为所预测的分类标签。 - # 首先使用共享参数的MLP对每个点进行特征提取,再使用MaxPooling在特征维进行池化操作, - # 使得网络对不同数量点的点云产生相同维度的特征向量,且输出对输入点的顺序产生不变性。 - # 在得到固定维度的特征向量之后,再使用一个MLP对其进行分类。 - pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay) - # 调用pointnet_cls下的get_loss() - loss = MODEL.get_loss(pred, labels_pl, end_points) - tf.compat.v1.summary.scalar('loss', loss) - - # tf.argmax(pred, 2) 返回pred C 这个维度的最大值索引返回相同维度的bool值矩阵 - # tf.equal() 比较两个张量对应位置是否相等 - correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64)) - # 压缩求和,用于降维 - accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE) - tf.compat.v1.summary.scalar('accuracy', accuracy) - - # Get training operator - # 取得学习率(自定义方法),获得衰减后的学习率,以及选择优化器optimizer。 - learning_rate = get_learning_rate(batch) - tf.compat.v1.summary.scalar('learning_rate', learning_rate) - if OPTIMIZER == 'momentum': - optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM) - elif OPTIMIZER == 'adam': - optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate) - # minimize的内部存在两个操作:(1)计算各个变量的梯度 (2)用梯度更新这些变量的值 - # (1)计算loss对指定val_list的梯度(导数),返回元组列表[(gradient,variable),…] - # (2)用计算得到的梯度来更新对应的变量(权重) - # 注意:在程序中global_step初始化为0,每次更新参数时,自动加1 - # 将minimize()分成两个步骤的原因:在某种情况下对梯度进行修正,防止梯度消失或者梯度爆炸 - train_op = optimizer.minimize(loss, global_step=batch) - - # Add ops to save and restore all the variables. - saver = tf.compat.v1.train.Saver() - - # Create a session - # 配置session 运行参数。 - # 创建sess的时候对sess进行参数配置 - config = tf.compat.v1.ConfigProto() - # =True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。 - config.gpu_options.allow_growth = True - # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行 - config.allow_soft_placement = True - # 在终端打印出各项操作是在哪个设备上运行的 - config.log_device_placement = False - # 创建 sess, 才能运行框架 - sess = tf.compat.v1.Session(config=config) - - # Add summary writers - #merged = tf.merge_all_summaries() - merged = tf.compat.v1.summary.merge_all() - train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'), - sess.graph) - test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test')) - - # Init variables - # 初始化参数,开始训练 - # train_one_epoch 函数用来训练一个epoch,eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的 - # accuracy和loss。每10个epoch保存1次模型。 - init = tf.compat.v1.global_variables_initializer() - # To fix the bug introduced in TF 0.12.1 as in - # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1 - #sess.run(init) - # 运行sess初始化所有的全局变量 - sess.run(init, {is_training_pl: True}) - - # ops 是一个字典,作为接口传入训练和评估 epoch 循环中。 - # pred 是数据处理网络模块;loss 是 损失函数;train_op 是优化器;batch 是当前的批次 - ops = {'pointclouds_pl': pointclouds_pl, - 'labels_pl': labels_pl, - 'is_training_pl': is_training_pl, - 'pred': pred, - 'loss': loss, - 'train_op': train_op, - 'merged': merged, - 'step': batch} - - for epoch in range(MAX_EPOCH): - # log(自定义方法) - log_string('**** EPOCH %03d ****' % (epoch)) - # 在同一个位置刷新输出 - sys.stdout.flush() - - # 训练一个批次(自定义方法) - # train_one_epoch 函数用来训练一个epoch - train_one_epoch(sess, ops, train_writer) - # 评估一个批次(自定义方法) - # eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的accuracy和loss - eval_one_epoch(sess, ops, test_writer) - - # Save the variables to disk. - # Save the variables to disk.每10个epoch保存1次模型 - if epoch % 10 == 0: - save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt")) - # log(自定义方法) - log_string("Model saved in file: %s" % save_path) - - -# provider.shuffle_data 函数随机打乱数据,返回打乱后的数据。 -# num_batches = file_size/BATCH_SIZE,计算在指定BATCH_SIZE下,训练1个epoch 需要几个mini-batch训练。 -def train_one_epoch(sess, ops, train_writer): - """ ops: dict mapping from string to tf ops """ - is_training = True - - # Shuffle train files - # 随机打乱训练数据 - train_file_idxs = np.arange(0, len(TRAIN_FILES)) - np.random.shuffle(train_file_idxs) - - for fn in range(len(TRAIN_FILES)): - log_string('----' + str(fn) + '-----') - current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]]) - current_data = current_data[:,0:NUM_POINT,:] - current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) - current_label = np.squeeze(current_label) - - file_size = current_data.shape[0] - num_batches = file_size // BATCH_SIZE - - total_correct = 0 - total_seen = 0 - loss_sum = 0 - - # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数 - # total_senn,总损失loss_sum. - for batch_idx in range(num_batches): - start_idx = batch_idx * BATCH_SIZE - end_idx = (batch_idx+1) * BATCH_SIZE - - # Augment batched point clouds by rotation and jittering - # 调用provider中rotate_point_cloud - rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :]) - jittered_data = provider.jitter_point_cloud(rotated_data) - feed_dict = {ops['pointclouds_pl']: jittered_data, - ops['labels_pl']: current_label[start_idx:end_idx], - ops['is_training_pl']: is_training,} - summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], - ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) - # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据 - train_writer.add_summary(summary, step) - pred_val = np.argmax(pred_val, 1) - correct = np.sum(pred_val == current_label[start_idx:end_idx]) - total_correct += correct - total_seen += BATCH_SIZE - loss_sum += loss_val - - # 记录平均loss,以及平均accuracy。 - log_string('mean loss: %f' % (loss_sum / float(num_batches))) - log_string('accuracy: %f' % (total_correct / float(total_seen))) - - -def eval_one_epoch(sess, ops, test_writer): - """ ops: dict mapping from string to tf ops """ - is_training = False - total_correct = 0 - total_seen = 0 - loss_sum = 0 - total_seen_class = [0 for _ in range(NUM_CLASSES)] - total_correct_class = [0 for _ in range(NUM_CLASSES)] - - for fn in range(len(TEST_FILES)): - log_string('----' + str(fn) + '-----') - current_data, current_label = provider.loadDataFile(TEST_FILES[fn]) - current_data = current_data[:,0:NUM_POINT,:] - current_label = np.squeeze(current_label) - - file_size = current_data.shape[0] - num_batches = file_size // BATCH_SIZE - - for batch_idx in range(num_batches): - start_idx = batch_idx * BATCH_SIZE - end_idx = (batch_idx+1) * BATCH_SIZE - - feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :], - ops['labels_pl']: current_label[start_idx:end_idx], - ops['is_training_pl']: is_training} - summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], - ops['loss'], ops['pred']], feed_dict=feed_dict) - pred_val = np.argmax(pred_val, 1) - correct = np.sum(pred_val == current_label[start_idx:end_idx]) - total_correct += correct - total_seen += BATCH_SIZE - loss_sum += (loss_val*BATCH_SIZE) - for i in range(start_idx, end_idx): - l = current_label[i] - total_seen_class[l] += 1 - total_correct_class[l] += (pred_val[i-start_idx] == l) - - log_string('eval mean loss: %f' % (loss_sum / float(total_seen))) - log_string('eval accuracy: %f'% (total_correct / float(total_seen))) - log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) - - - -if __name__ == "__main__": - train() - LOG_FOUT.close() -- Gitee From 570627f3860d78fba4dd92d31b82461d3b523c93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:29:34 +0000 Subject: [PATCH 16/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/train.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- TensorFlow2/built-in/keras_sample/train.py | 452 --------------------- 1 file changed, 452 deletions(-) delete mode 100644 TensorFlow2/built-in/keras_sample/train.py diff --git a/TensorFlow2/built-in/keras_sample/train.py b/TensorFlow2/built-in/keras_sample/train.py deleted file mode 100644 index 4a6683530..000000000 --- a/TensorFlow2/built-in/keras_sample/train.py +++ /dev/null @@ -1,452 +0,0 @@ -# -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# import npu_device -# npu_device.open().as_default() - - -import argparse -# import math -# import h5py -import numpy as np -import tensorflow as tf -import socket -import importlib -import os -import sys -BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -sys.path.append(BASE_DIR) -sys.path.append(os.path.join(BASE_DIR, 'models')) -sys.path.append(os.path.join(BASE_DIR, 'utils')) -import provider -# import tf_util -import time -import datetime -import ast -from npu_device.compat.v1.npu_init import * -import npu_device as npu -npu.compat.enable_v1() - -starttime = datetime.datetime.now() - -parser = argparse.ArgumentParser() -parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') -parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]') -parser.add_argument('--log_dir', default='log', help='Log dir [default: log]') -parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]') -parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]') -parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]') -parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]') -parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]') -parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]') -parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]') -parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]') -parser.add_argument('--data_path', type=str, default='', help='data path') -parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') -parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, - help='if or not over detection, default is False') -parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, - help='data dump flag, default is False') -parser.add_argument('--data_dump_step', default="10", - help='data dump step, default is 10') -parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') -parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') -parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') -parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') -parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, - help='use_mixlist flag, default is False') -parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, - help='fusion_off flag, default is False') -parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') -parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') -parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval,help='auto_tune flag, default is False') -FLAGS = parser.parse_args() - - -BATCH_SIZE = FLAGS.batch_size -NUM_POINT = FLAGS.num_point -MAX_EPOCH = FLAGS.max_epoch -BASE_LEARNING_RATE = FLAGS.learning_rate -GPU_INDEX = FLAGS.gpu -MOMENTUM = FLAGS.momentum -OPTIMIZER = FLAGS.optimizer -DECAY_STEP = FLAGS.decay_step -DECAY_RATE = FLAGS.decay_rate - -MODEL = importlib.import_module(FLAGS.model) # import network module -MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py') -LOG_DIR = FLAGS.log_dir -if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR) -os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def -os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure -LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w') -LOG_FOUT.write(str(FLAGS)+'\n') - -MAX_NUM_POINT = 2048 -NUM_CLASSES = 40 - -BN_INIT_DECAY = 0.5 -BN_DECAY_DECAY_RATE = 0.5 -BN_DECAY_DECAY_STEP = float(DECAY_STEP) -BN_DECAY_CLIP = 0.99 - -HOSTNAME = socket.gethostname() - -# ModelNet40 official train/test split -TRAIN_FILES = provider.getDataFiles( \ - os.path.join(FLAGS.data_path, 'modelnet40_ply_hdf5_2048/train_files.txt')) -TEST_FILES = provider.getDataFiles(\ - os.path.join(FLAGS.data_path, 'modelnet40_ply_hdf5_2048/test_files.txt')) - -def log_string(out_str): - LOG_FOUT.write(out_str+'\n') - LOG_FOUT.flush() - print(out_str) - - -# 计算指数衰减的学习率。训练时学习率最好随着训练衰减。 -# tf.train.exponential_decay函数实现指数衰减学习率。 -def get_learning_rate(batch): - # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。 - # 通过tf.train.exponential_decay函数实现指数衰减学习率。 - # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢, - # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。 - # tf.train.exponential_decay就是用来实现这个功能。 - # - # 步骤: - # 1.首先使用较大学习率(目的:为快速得到一个比较优的解); - # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定); - learning_rate = tf.compat.v1.train.exponential_decay( - BASE_LEARNING_RATE, # Base learning rate. - batch * BATCH_SIZE, # Current index into the dataset. - DECAY_STEP, # Decay step. - DECAY_RATE, # Decay rate. - staircase=True) - # 训练时学习率最好随着训练衰减,learning_rate最大取0.00001 (衰减后的学习率和0.00001取最大) - learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE! - return learning_rate - - -# 取得bn衰减 -# if the argument staircase is True, -# then global_step /decay_steps is an integer division and the decayed learning rate follows a staircase function. -# 计算衰减的Batch Normalization 的 decay。 -def get_bn_decay(batch): - # 指数衰减法 - - # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。 - # 通过tf.train.exponential_decay函数实现指数衰减学习率。 - # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢, - # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。 - # tf.train.exponential_decay就是用来实现这个功能。 - # - # 步骤: - # 1.首先使用较大学习率(目的:为快速得到一个比较优的解); - # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定); - bn_momentum = tf.compat.v1.train.exponential_decay( - BN_INIT_DECAY, - batch*BATCH_SIZE, - BN_DECAY_DECAY_STEP, - BN_DECAY_DECAY_RATE, - staircase=True) - # bn衰减0.99和1-衰减后的动量,取最小 - bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum) - return bn_decay - - -# 初始运行的训练函数。 -# 这一段主要是通过placeholder进行赋值, 模型的参数准备和构建整个训练网络(数据处理+loss+优化器),模型记录工作,最后进行训练. -def train(): - # 将这个类实例,也就是新生成的图作为整个 tensorflow 运行环境的默认图 - with tf.Graph().as_default(): - # 如果需要切换成CPU运算,可以调用tf.device(device_name)函数,其中device_name格式如 /cpu:0 其中的0表示设备号, - # TF不区分CPU的设备号,设置为0即可。GPU区分设备号 /gpu:0 和 /gpu:1 表示两张不同的显卡。 - # with tf.device('/gpu:'+str(GPU_INDEX)): - with tf.device('/gpu:0'): - # 使用了pointne_cls.py的placeholder_inputs()方法。 - # 取得占位符,点云,标签。 输入是 一批数据的数量,点的数量。 - # placeholder()函数是在神经网络构建graph的时候在模型中的占位,此时并没有把要输入的数据传入模型, - # 它只会分配必要的内存,用于传入外部数据。 - pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) - # 向指定好的对象中喂入数据:tf.placeholder() - # 取得占位符:是否在训练。 - is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=()) - print(is_training_pl) - - # Note the global_step=batch parameter to minimize. - # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains. - # 将 global_step = batch 参数最小化。 - # 这是在告诉优化器 在每次训练时 为你有用地增加'batch'参数。 - # 定义 batch = 0 - batch = tf.Variable(0) - # 取得bn衰减(自定义方法) - bn_decay = get_bn_decay(batch) - # 用来显示标量信息,一般在画loss,accuary时会用到这个函数。 - tf.compat.v1.summary.scalar('bn_decay', bn_decay) - - # Get model and loss - # 创建的数据处理网络为pred,调用 model\pointnet_cls 下的get_model()得到。由get_model()可知, - # pred的维度为B×N×40,40为分出的类别Channel数,对应40个分类标签。每个点的这40个值最大的一个的下标即为所预测的分类标签。 - # 首先使用共享参数的MLP对每个点进行特征提取,再使用MaxPooling在特征维进行池化操作, - # 使得网络对不同数量点的点云产生相同维度的特征向量,且输出对输入点的顺序产生不变性。 - # 在得到固定维度的特征向量之后,再使用一个MLP对其进行分类。 - pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay) - # 调用pointnet_cls下的get_loss() - loss = MODEL.get_loss(pred, labels_pl, end_points) - tf.compat.v1.summary.scalar('loss', loss) - - # tf.argmax(pred, 2) 返回pred C 这个维度的最大值索引返回相同维度的bool值矩阵 - # tf.equal() 比较两个张量对应位置是否相等 - correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64)) - # 压缩求和,用于降维 - accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE) - tf.compat.v1.summary.scalar('accuracy', accuracy) - - # Get training operator - # 取得学习率(自定义方法),获得衰减后的学习率,以及选择优化器optimizer。 - learning_rate = get_learning_rate(batch) - tf.compat.v1.summary.scalar('learning_rate', learning_rate) - if OPTIMIZER == 'momentum': - optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM) - elif OPTIMIZER == 'adam': - optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate) - # minimize的内部存在两个操作:(1)计算各个变量的梯度 (2)用梯度更新这些变量的值 - # (1)计算loss对指定val_list的梯度(导数),返回元组列表[(gradient,variable),…] - # (2)用计算得到的梯度来更新对应的变量(权重) - # 注意:在程序中global_step初始化为0,每次更新参数时,自动加1 - # 将minimize()分成两个步骤的原因:在某种情况下对梯度进行修正,防止梯度消失或者梯度爆炸 - train_op = optimizer.minimize(loss, global_step=batch) - - # Add ops to save and restore all the variables. - saver = tf.compat.v1.train.Saver() - - # Create a session - # 配置session 运行参数。 - # 创建sess的时候对sess进行参数配置 - config = tf.compat.v1.ConfigProto() - custom_op = config.graph_options.rewrite_options.custom_optimizers.add() - custom_op.name = 'NpuOptimizer' - custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(FLAGS.precision_mode) - if FLAGS.data_dump_flag: - custom_op.parameter_map["enable_dump"].b = True - custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.data_dump_path) - custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes(FLAGS.data_dump_step) - custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all") - if FLAGS.over_dump: - custom_op.parameter_map["enable_dump_debug"].b = True - custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.over_dump_path) - custom_op.parameter_map["dump_debug_mode"].s = tf.compat.as_bytes("all") - if FLAGS.profiling: - custom_op.parameter_map["precision_mode"].b = True - profiling_options = '{"output":"' + FLAGS.profiling_dump_path + '", \ - "training_trace":"on", \ - "task_trace":"on", \ - "aicpu":"on", \ - "aic_metrics":"PipeUtilization",\ - "fp_point":"", \ - "bp_point":""}' - custom_op.parameter_map["profiling_options"].s = tf.compat.as_bytes(profiling_options) - if FLAGS.use_mixlist and FLAGS.precision_mode=='allow_mix_precision': - custom_op.parameter_map["modify_mixlist"].s = tf.compat.as_bytes(FLAGS.mixlist_file) - if FLAGS.fusion_off_flag: - custom_op.parameter_map["sfusion_switch_file"].s = tf.compat.as_bytes(FLAGS.fusion_off_file) - if FLAGS.auto_tune: - custom_op.parameter_map["auto_tune_mode"].s = tf.compat.as_bytes("RL,GA") - config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 必须显式关闭 - config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF # 必须显式关闭 - - # =True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。 - config.gpu_options.allow_growth = True - # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行 - config.allow_soft_placement = True - # 在终端打印出各项操作是在哪个设备上运行的 - config.log_device_placement = False - # 创建 sess, 才能运行框架 - sess = tf.compat.v1.Session(config=config) - - # Add summary writers - #merged = tf.merge_all_summaries() - merged = tf.compat.v1.summary.merge_all() - train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'), - sess.graph) - test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test')) - - # Init variables - # 初始化参数,开始训练 - # train_one_epoch 函数用来训练一个epoch,eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的 - # accuracy和loss。每10个epoch保存1次模型。 - init = tf.compat.v1.global_variables_initializer() - # To fix the bug introduced in TF 0.12.1 as in - # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1 - #sess.run(init) - # 运行sess初始化所有的全局变量 - sess.run(init, {is_training_pl: True}) - - # ops 是一个字典,作为接口传入训练和评估 epoch 循环中。 - # pred 是数据处理网络模块;loss 是 损失函数;train_op 是优化器;batch 是当前的批次 - ops = {'pointclouds_pl': pointclouds_pl, - 'labels_pl': labels_pl, - 'is_training_pl': is_training_pl, - 'pred': pred, - 'loss': loss, - 'train_op': train_op, - 'merged': merged, - 'step': batch} - - for epoch in range(MAX_EPOCH): - # log(自定义方法) - log_string('**** EPOCH %03d ****' % (epoch)) - # 在同一个位置刷新输出 - sys.stdout.flush() - - # 训练一个批次(自定义方法) - # train_one_epoch 函数用来训练一个epoch - train_one_epoch(sess, ops, train_writer) - # 评估一个批次(自定义方法) - # eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的accuracy和loss - eval_one_epoch(sess, ops, test_writer) - - # Save the variables to disk. - # Save the variables to disk.每10个epoch保存1次模型 - if epoch % 10 == 0: - save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt")) - # log(自定义方法) - log_string("Model saved in file: %s" % save_path) - - -# provider.shuffle_data 函数随机打乱数据,返回打乱后的数据。 -# num_batches = file_size/BATCH_SIZE,计算在指定BATCH_SIZE下,训练1个epoch 需要几个mini-batch训练。 -def train_one_epoch(sess, ops, train_writer): - """ ops: dict mapping from string to tf ops """ - is_training = True - - # Shuffle train files - # 随机打乱训练数据 - train_file_idxs = np.arange(0, len(TRAIN_FILES)) - np.random.shuffle(train_file_idxs) - - L = [] - for fn in range(len(TRAIN_FILES)): - log_string('----' + str(fn) + '-----') - current_data, current_label = provider.loadDataFile(os.path.join(FLAGS.data_path, TRAIN_FILES[train_file_idxs[fn]])) - current_data = current_data[:,0:NUM_POINT,:] - current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) - current_label = np.squeeze(current_label) - - file_size = current_data.shape[0] - num_batches = file_size // BATCH_SIZE - - total_correct = 0 - total_seen = 0 - loss_sum = 0 - endtime = datetime.datetime.now() - if fn == 0: - TOTLE_TIME = (endtime - starttime).seconds - L.append(TOTLE_TIME) - - # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数 - - # total_senn,总损失loss_sum. - for batch_idx in range(num_batches): - start_time = time.time() - start_idx = batch_idx * BATCH_SIZE - end_idx = (batch_idx+1) * BATCH_SIZE - - # Augment batched point clouds by rotation and jittering - # 调用provider中rotate_point_cloud - rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :]) - jittered_data = provider.jitter_point_cloud(rotated_data) - feed_dict = {ops['pointclouds_pl']: jittered_data, - ops['labels_pl']: current_label[start_idx:end_idx], - ops['is_training_pl']: is_training,} - summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], - ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) - cost_time = time.time() - start_time - FPS = BATCH_SIZE / cost_time - # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据 - train_writer.add_summary(summary, step) - pred_val = np.argmax(pred_val, 1) - correct = np.sum(pred_val == current_label[start_idx:end_idx]) - total_correct += correct - total_seen += BATCH_SIZE - loss_sum += loss_val - - # 记录平均loss,以及平均accuracy。 - log_string('TOTLE_TIME : %.2f' % (float(L[0]))) - log_string('FPS : %.2f' % (float(FPS))) - log_string('mean loss: %f' % (loss_sum / float(num_batches))) - log_string('accuracy: %f' % (total_correct / float(total_seen))) - - -def eval_one_epoch(sess, ops, test_writer): - """ ops: dict mapping from string to tf ops """ - is_training = False - total_correct = 0 - total_seen = 0 - loss_sum = 0 - total_seen_class = [0 for _ in range(NUM_CLASSES)] - total_correct_class = [0 for _ in range(NUM_CLASSES)] - - for fn in range(len(TEST_FILES)): - log_string('----' + str(fn) + '-----') - current_data, current_label = provider.loadDataFile(os.path.join(FLAGS.data_path, TEST_FILES[fn])) - current_data = current_data[:,0:NUM_POINT,:] - current_label = np.squeeze(current_label) - - file_size = current_data.shape[0] - num_batches = file_size // BATCH_SIZE - - for batch_idx in range(num_batches): - start_idx = batch_idx * BATCH_SIZE - end_idx = (batch_idx+1) * BATCH_SIZE - - feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :], - ops['labels_pl']: current_label[start_idx:end_idx], - ops['is_training_pl']: is_training} - summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], - ops['loss'], ops['pred']], feed_dict=feed_dict) - pred_val = np.argmax(pred_val, 1) - correct = np.sum(pred_val == current_label[start_idx:end_idx]) - total_correct += correct - total_seen += BATCH_SIZE - loss_sum += (loss_val*BATCH_SIZE) - for i in range(start_idx, end_idx): - l = current_label[i] - total_seen_class[l] += 1 - total_correct_class[l] += (pred_val[i-start_idx] == l) - - log_string('eval mean loss: %f' % (loss_sum / float(total_seen))) - log_string('eval accuracy: %f'% (total_correct / float(total_seen))) - log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) - - - -if __name__ == "__main__": - train() - LOG_FOUT.close() -- Gitee From aaa9859708977267b8eb7672222083c111e8b26f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:29:42 +0000 Subject: [PATCH 17/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/requirements.txt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- TensorFlow2/built-in/keras_sample/requirements.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 TensorFlow2/built-in/keras_sample/requirements.txt diff --git a/TensorFlow2/built-in/keras_sample/requirements.txt b/TensorFlow2/built-in/keras_sample/requirements.txt deleted file mode 100644 index e69de29bb..000000000 -- Gitee From 701f6634e01f4a8445cea4c4be9c38063abc1acd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:29:49 +0000 Subject: [PATCH 18/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/README=5FBAK.md?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../built-in/keras_sample/README_BAK.md | 77 ------------------- 1 file changed, 77 deletions(-) delete mode 100644 TensorFlow2/built-in/keras_sample/README_BAK.md diff --git a/TensorFlow2/built-in/keras_sample/README_BAK.md b/TensorFlow2/built-in/keras_sample/README_BAK.md deleted file mode 100644 index 6d7185b09..000000000 --- a/TensorFlow2/built-in/keras_sample/README_BAK.md +++ /dev/null @@ -1,77 +0,0 @@ -## PointNet: *Deep Learning on Point Sets for 3D Classification and Segmentation* -Created by Charles R. Qi, Hao Su, Kaichun Mo, Leonidas J. Guibas from Stanford University. - -![prediction example](https://github.com/charlesq34/pointnet/blob/master/doc/teaser.png) - -### Introduction -This work is based on our [arXiv tech report](https://arxiv.org/abs/1612.00593), which is going to appear in CVPR 2017. We proposed a novel deep net architecture for point clouds (as unordered point sets). You can also check our [project webpage](http://stanford.edu/~rqi/pointnet) for a deeper introduction. - -Point cloud is an important type of geometric data structure. Due to its irregular format, most researchers transform such data to regular 3D voxel grids or collections of images. This, however, renders data unnecessarily voluminous and causes issues. In this paper, we design a novel type of neural network that directly consumes point clouds, which well respects the permutation invariance of points in the input. Our network, named PointNet, provides a unified architecture for applications ranging from object classification, part segmentation, to scene semantic parsing. Though simple, PointNet is highly efficient and effective. - -In this repository, we release code and data for training a PointNet classification network on point clouds sampled from 3D shapes, as well as for training a part segmentation network on ShapeNet Part dataset. - -### Citation -If you find our work useful in your research, please consider citing: - - @article{qi2016pointnet, - title={PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation}, - author={Qi, Charles R and Su, Hao and Mo, Kaichun and Guibas, Leonidas J}, - journal={arXiv preprint arXiv:1612.00593}, - year={2016} - } - -### Installation - -Install TensorFlow. You may also need to install h5py. The code has been tested with Python 2.7, TensorFlow 1.0.1, CUDA 8.0 and cuDNN 5.1 on Ubuntu 14.04. - -If you are using PyTorch, you can find a third-party pytorch implementation here. - -To install h5py for Python: -```bash -sudo apt-get install libhdf5-dev -sudo pip install h5py -``` - -### Usage -To train a model to classify point clouds sampled from 3D shapes: - - python train.py - -Log files and network parameters will be saved to `log` folder in default. Point clouds of ModelNet40 models in HDF5 files will be automatically downloaded (416MB) to the data folder. Each point cloud contains 2048 points uniformly sampled from a shape surface. Each cloud is zero-mean and normalized into an unit sphere. There are also text files in `data/modelnet40_ply_hdf5_2048` specifying the ids of shapes in h5 files. - -To see HELP for the training script: - - python train.py -h - -We can use TensorBoard to view the network architecture and monitor the training progress. - - tensorboard --logdir log - -After the above training, we can evaluate the model and output some visualizations of the error cases. - - python evaluate.py --visu - -Point clouds that are wrongly classified will be saved to `dump` folder in default. We visualize the point cloud by rendering it into three-view images. - -If you'd like to prepare your own data, you can refer to some helper functions in `utils/data_prep_util.py` for saving and loading HDF5 files. - -### Part Segmentation -To train a model for object part segmentation, firstly download the data: - - cd part_seg - sh download_data.sh - -The downloading script will download ShapeNetPart dataset (around 1.08GB) and our prepared HDF5 files (around 346MB). - -Then you can run `train.py` and `test.py` in the `part_seg` folder for training and testing (computing mIoU for evaluation). - -### License -Our code is released under MIT License (see LICENSE file for details). - -### Selected Projects that Use PointNet - -* PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space by Qi et al. (NIPS 2017) A hierarchical feature learning framework on point clouds. The PointNet++ architecture applies PointNet recursively on a nested partitioning of the input point set. It also proposes novel layers for point clouds with non-uniform densities. -* Exploring Spatial Context for 3D Semantic Segmentation of Point Clouds by Engelmann et al. (ICCV 2017 workshop). This work extends PointNet for large-scale scene segmentation. -* PCPNET: Learning Local Shape Properties from Raw Point Clouds by Guerrero et al. (arXiv). The work adapts PointNet for local geometric properties (e.g. normal and curvature) estimation in noisy point clouds. -* VoxelNet: End-to-End Learning for Point Cloud Based 3D Object Detection by Zhou et al. from Apple (arXiv) This work studies 3D object detection using LiDAR point clouds. It splits space into voxels, use PointNet to learn local voxel features and then use 3D CNN for region proposal, object classification and 3D bounding box estimation. -* Frustum PointNets for 3D Object Detection from RGB-D Data by Qi et al. (arXiv) A novel framework for 3D object detection with RGB-D data. The method proposed has achieved first place on KITTI 3D object detection benchmark on all categories (last checked on 11/30/2017). -- Gitee From da4c030b682710472c9ae7f3c79c5bd8bc53de06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:29:59 +0000 Subject: [PATCH 19/54] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20PointNet=5FID2913=5F?= =?UTF-8?q?for=5FTensorFlow2.X?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee From 9ad695cf55f6c68dbefebcdce45c1b3482fb037a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:30:20 +0000 Subject: [PATCH 20/54] =?UTF-8?q?PointNet=5FID2913=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../.gitignore | 2 + .../PointNet_ID2913_for_TensorFlow2.X/LICENSE | 51 ++ .../README.md | 233 +++++++++ .../README_BAK.md | 77 +++ .../evaluate.py | 199 ++++++++ .../modelzoo_level.txt | 3 + .../provider.py | 165 +++++++ .../requirements.txt | 0 .../train.py | 452 ++++++++++++++++++ .../train_real.py | 381 +++++++++++++++ 10 files changed, 1563 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.gitignore create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README_BAK.md create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/evaluate.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/provider.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train_real.py diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.gitignore b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.gitignore new file mode 100644 index 000000000..8efb80c9a --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.gitignore @@ -0,0 +1,2 @@ +/data/* +/log/* diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..e93be0a6b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/LICENSE @@ -0,0 +1,51 @@ +PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation. + +Copyright (c) 2017, Geometric Computation Group of Stanford University + +The MIT License (MIT) + +Copyright (c) 2017 Charles R. Qi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +PointNet:针对3D分类和分割的点集深度学习。 + +斯坦福大学几何计算小组(c)2017版权所有 + +MIT许可证(MIT) + +版权所有(c)2017 Charles R.Qi + +特此授予获得副本的任何人免费的许可 +软件和相关文档文件(以下简称“软件”)的交易 +在软件中不受限制,包括但不限于权利 +使用,复制,修改,合并,发布,分发,再许可和/或出售 +本软件的副本,并允许本软件所针对的人 +具备以下条件: + +以上版权声明和此许可声明应包含在所有 +复制或实质性的软件部分。 + +本软件按“原样”提供,不提供任何形式的明示或明示保证。 +暗示(包括但不限于适销性的保证), +适用于特定目的和非侵权。在任何情况下都不会 +作者或版权持有人对任何索赔,损害或其他责任 +无论是由于合同,侵权或其他形式的诉讼而引起的责任, +与软件或软件的使用或其他交易无关或与之有关 +软件。 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..2e27ca0f6 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README.md @@ -0,0 +1,233 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Instance Segmentation** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.04.11** + +**大小(Size):43M** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Official** + +**描述(Description):基于TensorFlow2.X框架的3D点云采样的图像分类和分割网络训练代码** + + +

概述

+ +## 简述 + +点云(point cloud)是一种非常重要的几何数据结构。由于点云的无规律性(irregular format),大部分研究者将点云转换为规律的3D体素网格(3D voxel grids)或者一组不同视角的2D图像。这种转换数据的方式,增加了数据的规模,同时也会带来一系列问题。PointNet是一种可以直接处理点云的神经网络,并且考虑了输入点云序列不变性的特征。PointNet提供了统一的应用架构,可以用于分类(classification),块分割(part segmentation),语义理解(semantic parsing)。尽管网络很简单,但是非常有效。从实验结果上看,它超越了经典的方法,至少也达到同样的水平。理论上,我们进行了分析,包括网络学习了什么,以及当数据被一定程度的干扰后,网络为什么能保持稳定。 + + + - 参考论文: + + https://arxiv.org/abs/1612.00593(https://arxiv.org/abs/1612.00593) + + - 参考实现: + https://github.com/keras-team/keras-io/blob/master/examples/vision/pointnet.py(https://github.com/keras-team/keras-io/blob/master/examples/vision/pointnet.py) + + + - 适配昇腾 AI 处理器的实现: + skip + + - 通过Git获取对应commit\_id的代码方法如下: + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + + + + +## 默认配置 + + +- 网络结构 + - 设计最大池化层(对称函数),用于聚合所有点的特征信息 + - 计算全局点云特征向量后,通过将全局特征与每个点特征连接起来,将全局特征反馈给每个点特征。然后我们在合并的点特征的基础上提取新的每点特征——这时,每点特征都能识别局部和全局信息 + - 通过一个小网络(T-net)来预测一个仿射变换矩阵,并直接将这个变换应用到输入点的坐标上。小网络与大网络相似,由点独立特征提取、最大池化和全连接层等基本模块组成。 + +- 训练超参(单卡): + - Batch size: 32 + - learning_rate:0.0015 + - num_point:2048 + - Train epoch: 250 + + +## 支持特性 + +| 特性列表 | 是否支持 | +|-------|------| +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +相关代码示例。 + +``` + config_proto = tf.ConfigProto(allow_soft_placement=True) + custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add() + custom_op.name = 'NpuOptimizer' + custom_op.parameter_map["use_off_line"].b = True + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") + config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF + session_config = npu_config_proto(config_proto=config_proto) +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + + +

快速上手

+ +## 数据集准备 + +1. 模型训练使用modelnet40_ply_hdf5_2048数据集,即ModelNet40模型训练出的点云数据(HDF5文件类型)。每个点云包含从形状表面均匀采样的 2048 个点。每个云都是零均值并归一化为一个单位球体。 +2. 安装 h5py。该代码已在 Ubuntu 14.04 上使用 Python 2.7、TensorFlow 1.0.1、CUDA 8.0 和 cuDNN 5.1 进行了测试。 +``` +sudo apt-get install libhdf5-dev +sudo pip install h5py +``` +3.log默认情况下,日志文件和网络参数将保存到文件夹中。HDF5 文件中ModelNet40模型的点云将自动下载 (416MB) 到数据文件夹。 + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2.1 设置单卡训练参数(脚本位于PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。 + + + ``` + batch_size=32 + #训练step + train_epochs=250 + #学习率 + learning_rate=0.0015 + ``` + + + + 2.2 单卡训练指令(PointNet_ID2913_for_TensorFlow2.X/test) + + ``` + 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡 + bash train_full_1p.sh --data_path=xx + 数据集应为h5类型,配置data_path时需指定为data这一层,例:--data_path=/home/data + ├─data + ├─ply_data_test0.h5* + ├─ply_data_test_0_id2file.json* + ├─ply_data_test1.h5* + ├─ply_data_test_1_id2file.json* + ├─ply_data_train0.h5* + ├─ply_data_train_0_id2file.json* + ├─ply_data_train1.h5* + ├─ply_data_train_1_id2file.json* + ├─ply_data_train2.h5* + ├─ply_data_train_2_id2file.json* + ├─ply_data_train3.h5* + ├─ply_data_train_3_id2file.json* + ├─ply_data_train4.h5* + ├─ply_data_train_4_id2file.json* + ├─shape_names.txt* + ├─test_files.txt* + ├─train_files.txt* + + ``` + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备 + +- 模型训练 + + 请参考“快速上手”章节 + +

高级参考

+ +## 脚本和示例代码 + + ├── README.md //说明文档 + ├── requirements.txt //依赖 + ├── modelzoo_level.txt //状态文件 + ├── provider.py //数据集处理脚本 + ├── train.py //网络训练脚本 + ├── models //网络结构定义脚本 + |—— pointnet_cls.py + |—— pointnet_cls_basic.py + |—— pointnet_seg.py + |—— transform_nets.py + ├── test + | |—— train_full_1p.sh //单卡训练脚本 + | |—— train_performance_1p.sh //单卡训练脚本 + ... + +## 脚本参数 + +``` +batch_size 训练batch_size +learning_rate 初始学习率 +max_epochs 最大训练epoch数 +num_point 每个点云包含从形状表面均匀采样的点数 +precision_mode default="allow_mix_precision", type=str,help='the path to save over dump data' +over_dump type=ast.literal_eval,help='if or not over detection, default is False' +data_dump_flag type=ast.literal_eval,help='data dump flag, default is False' +data_dump_step data dump step, default is 10 +profiling type=ast.literal_eval help='if or not profiling for performance debug, default is False' +profiling_dump_path type=str, help='the path to save profiling data' +over_dump_path type=str, help='the path to save over dump data' +data_dump_path type=str, help='the path to save dump data' +use_mixlist type=ast.literal_eval,help='use_mixlist flag, default is False' +fusion_off_flag type=ast.literal_eval,help='fusion_off flag, default is False' +mixlist_file type=str,help='mixlist file name, default is ops_info.json' +fusion_off_file type=str,help='fusion_off file name, default is fusion_switch.cfg' +auto_tune help='auto_tune flag, default is False' +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡训练。 +将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README_BAK.md b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README_BAK.md new file mode 100644 index 000000000..6d7185b09 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/README_BAK.md @@ -0,0 +1,77 @@ +## PointNet: *Deep Learning on Point Sets for 3D Classification and Segmentation* +Created by Charles R. Qi, Hao Su, Kaichun Mo, Leonidas J. Guibas from Stanford University. + +![prediction example](https://github.com/charlesq34/pointnet/blob/master/doc/teaser.png) + +### Introduction +This work is based on our [arXiv tech report](https://arxiv.org/abs/1612.00593), which is going to appear in CVPR 2017. We proposed a novel deep net architecture for point clouds (as unordered point sets). You can also check our [project webpage](http://stanford.edu/~rqi/pointnet) for a deeper introduction. + +Point cloud is an important type of geometric data structure. Due to its irregular format, most researchers transform such data to regular 3D voxel grids or collections of images. This, however, renders data unnecessarily voluminous and causes issues. In this paper, we design a novel type of neural network that directly consumes point clouds, which well respects the permutation invariance of points in the input. Our network, named PointNet, provides a unified architecture for applications ranging from object classification, part segmentation, to scene semantic parsing. Though simple, PointNet is highly efficient and effective. + +In this repository, we release code and data for training a PointNet classification network on point clouds sampled from 3D shapes, as well as for training a part segmentation network on ShapeNet Part dataset. + +### Citation +If you find our work useful in your research, please consider citing: + + @article{qi2016pointnet, + title={PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation}, + author={Qi, Charles R and Su, Hao and Mo, Kaichun and Guibas, Leonidas J}, + journal={arXiv preprint arXiv:1612.00593}, + year={2016} + } + +### Installation + +Install TensorFlow. You may also need to install h5py. The code has been tested with Python 2.7, TensorFlow 1.0.1, CUDA 8.0 and cuDNN 5.1 on Ubuntu 14.04. + +If you are using PyTorch, you can find a third-party pytorch implementation here. + +To install h5py for Python: +```bash +sudo apt-get install libhdf5-dev +sudo pip install h5py +``` + +### Usage +To train a model to classify point clouds sampled from 3D shapes: + + python train.py + +Log files and network parameters will be saved to `log` folder in default. Point clouds of ModelNet40 models in HDF5 files will be automatically downloaded (416MB) to the data folder. Each point cloud contains 2048 points uniformly sampled from a shape surface. Each cloud is zero-mean and normalized into an unit sphere. There are also text files in `data/modelnet40_ply_hdf5_2048` specifying the ids of shapes in h5 files. + +To see HELP for the training script: + + python train.py -h + +We can use TensorBoard to view the network architecture and monitor the training progress. + + tensorboard --logdir log + +After the above training, we can evaluate the model and output some visualizations of the error cases. + + python evaluate.py --visu + +Point clouds that are wrongly classified will be saved to `dump` folder in default. We visualize the point cloud by rendering it into three-view images. + +If you'd like to prepare your own data, you can refer to some helper functions in `utils/data_prep_util.py` for saving and loading HDF5 files. + +### Part Segmentation +To train a model for object part segmentation, firstly download the data: + + cd part_seg + sh download_data.sh + +The downloading script will download ShapeNetPart dataset (around 1.08GB) and our prepared HDF5 files (around 346MB). + +Then you can run `train.py` and `test.py` in the `part_seg` folder for training and testing (computing mIoU for evaluation). + +### License +Our code is released under MIT License (see LICENSE file for details). + +### Selected Projects that Use PointNet + +* PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space by Qi et al. (NIPS 2017) A hierarchical feature learning framework on point clouds. The PointNet++ architecture applies PointNet recursively on a nested partitioning of the input point set. It also proposes novel layers for point clouds with non-uniform densities. +* Exploring Spatial Context for 3D Semantic Segmentation of Point Clouds by Engelmann et al. (ICCV 2017 workshop). This work extends PointNet for large-scale scene segmentation. +* PCPNET: Learning Local Shape Properties from Raw Point Clouds by Guerrero et al. (arXiv). The work adapts PointNet for local geometric properties (e.g. normal and curvature) estimation in noisy point clouds. +* VoxelNet: End-to-End Learning for Point Cloud Based 3D Object Detection by Zhou et al. from Apple (arXiv) This work studies 3D object detection using LiDAR point clouds. It splits space into voxels, use PointNet to learn local voxel features and then use 3D CNN for region proposal, object classification and 3D bounding box estimation. +* Frustum PointNets for 3D Object Detection from RGB-D Data by Qi et al. (arXiv) A novel framework for 3D object detection with RGB-D data. The method proposed has achieved first place on KITTI 3D object detection benchmark on all categories (last checked on 11/30/2017). diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/evaluate.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/evaluate.py new file mode 100644 index 000000000..749f8c7f8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/evaluate.py @@ -0,0 +1,199 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import tensorflow as tf +import numpy as np +import argparse +import socket +import importlib +import time +import os +import scipy.misc +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(BASE_DIR, 'models')) +sys.path.append(os.path.join(BASE_DIR, 'utils')) +import provider +import pc_util + + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') +parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]') +parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 1]') +parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]') +parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]') +parser.add_argument('--dump_dir', default='dump', help='dump folder path [dump]') +parser.add_argument('--visu', action='store_true', help='Whether to dump image for error case [default: False]') +FLAGS = parser.parse_args() + + +BATCH_SIZE = FLAGS.batch_size +NUM_POINT = FLAGS.num_point +MODEL_PATH = FLAGS.model_path +GPU_INDEX = FLAGS.gpu +MODEL = importlib.import_module(FLAGS.model) # import network module +DUMP_DIR = FLAGS.dump_dir +if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR) +LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w') +LOG_FOUT.write(str(FLAGS)+'\n') + +NUM_CLASSES = 40 +SHAPE_NAMES = [line.rstrip() for line in \ + open(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/shape_names.txt'))] + +HOSTNAME = socket.gethostname() + +# ModelNet40 official train/test split +TRAIN_FILES = provider.getDataFiles( \ + os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt')) +TEST_FILES = provider.getDataFiles(\ + os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt')) + +def log_string(out_str): + LOG_FOUT.write(out_str+'\n') + LOG_FOUT.flush() + print(out_str) + +def evaluate(num_votes): + is_training = False + + with tf.device('/cpu:0'): + pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) + is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=()) + + # simple model + pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl) + loss = MODEL.get_loss(pred, labels_pl, end_points) + + # Add ops to save and restore all the variables. + saver = tf.compat.v1.train.Saver() + + # Create a session + config = tf.compat.v1.ConfigProto() + config.gpu_options.allow_growth = True + config.allow_soft_placement = True + config.log_device_placement = True + sess = tf.compat.v1.Session(config=config) + + # Restore variables from disk. + saver.restore(sess, MODEL_PATH) + log_string("Model restored.") + + ops = {'pointclouds_pl': pointclouds_pl, + 'labels_pl': labels_pl, + 'is_training_pl': is_training_pl, + 'pred': pred, + 'loss': loss} + + eval_one_epoch(sess, ops, num_votes) + + +def eval_one_epoch(sess, ops, num_votes=1, topk=1): + error_cnt = 0 + is_training = False + total_correct = 0 + total_seen = 0 + loss_sum = 0 + total_seen_class = [0 for _ in range(NUM_CLASSES)] + total_correct_class = [0 for _ in range(NUM_CLASSES)] + fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w') + for fn in range(len(TEST_FILES)): + log_string('----'+str(fn)+'----') + current_data, current_label = provider.loadDataFile(TEST_FILES[fn]) + current_data = current_data[:,0:NUM_POINT,:] + current_label = np.squeeze(current_label) + print(current_data.shape) + + file_size = current_data.shape[0] + num_batches = file_size // BATCH_SIZE + print(file_size) + + for batch_idx in range(num_batches): + start_idx = batch_idx * BATCH_SIZE + end_idx = (batch_idx+1) * BATCH_SIZE + cur_batch_size = end_idx - start_idx + + # Aggregating BEG + batch_loss_sum = 0 # sum of losses for the batch + batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes + batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes + for vote_idx in range(num_votes): + rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :], + vote_idx/float(num_votes) * np.pi * 2) + feed_dict = {ops['pointclouds_pl']: rotated_data, + ops['labels_pl']: current_label[start_idx:end_idx], + ops['is_training_pl']: is_training} + loss_val, pred_val = sess.run([ops['loss'], ops['pred']], + feed_dict=feed_dict) + batch_pred_sum += pred_val + batch_pred_val = np.argmax(pred_val, 1) + for el_idx in range(cur_batch_size): + batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1 + batch_loss_sum += (loss_val * cur_batch_size / float(num_votes)) + # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1] + # pred_val = np.argmax(batch_pred_classes, 1) + pred_val = np.argmax(batch_pred_sum, 1) + # Aggregating END + + correct = np.sum(pred_val == current_label[start_idx:end_idx]) + # correct = np.sum(pred_val_topk[:,0:topk] == label_val) + total_correct += correct + total_seen += cur_batch_size + loss_sum += batch_loss_sum + + for i in range(start_idx, end_idx): + l = current_label[i] + total_seen_class[l] += 1 + total_correct_class[l] += (pred_val[i-start_idx] == l) + fout.write('%d, %d\n' % (pred_val[i-start_idx], l)) + + if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP! + img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l], + SHAPE_NAMES[pred_val[i-start_idx]]) + img_filename = os.path.join(DUMP_DIR, img_filename) + output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :])) + scipy.misc.imsave(img_filename, output_img) + error_cnt += 1 + + log_string('eval mean loss: %f' % (loss_sum / float(total_seen))) + log_string('eval accuracy: %f' % (total_correct / float(total_seen))) + log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) + + class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float) + for i, name in enumerate(SHAPE_NAMES): + log_string('%10s:\t%0.3f' % (name, class_accuracies[i])) + + + +if __name__=='__main__': + with tf.Graph().as_default(): + evaluate(num_votes=1) + LOG_FOUT.close() diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..31529da2e --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:OK +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/provider.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/provider.py new file mode 100644 index 000000000..18651c47f --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/provider.py @@ -0,0 +1,165 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import sys +import numpy as np +import h5py + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +# Download dataset for point cloud classification +# 拼接data路径 +''' +DATA_DIR = os.path.join(BASE_DIR, 'data') +# 如果没有路径,则创建文件夹 +if not os.path.exists(DATA_DIR): + os.mkdir(DATA_DIR) +# 若不存在指定的文件夹,则从指定url下载压缩包,并解压缩 +# 实际上不好用,zipfile下载不下来。所以mv和rm就都报错了。 +if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')): + www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip' + zipfile = os.path.basename(www) + os.system('wget %s; unzip %s' % (www, zipfile)) + os.system('mv %s %s' % (zipfile[:-4], DATA_DIR)) + os.system('rm %s' % (zipfile)) +''' + +# 把数据随机打乱 +def shuffle_data(data, labels): + """ Shuffle data and labels. + Input: + data: B,N,... numpy array + label: B,... numpy array + Return: + shuffled data, label and shuffle indices + """ + # 取标签长度 + idx = np.arange(len(labels)) + # 打乱索引 + np.random.shuffle(idx) + # 返回打乱的数据,标签和索引 + return data[idx, ...], labels[idx], idx + + +# 旋转点云 +def rotate_point_cloud(batch_data): + # 随机旋转点云以扩大数据集 + # 旋转是基于向上方向的每个形状 + # 输入: + # BxNx3阵列,原始batch的点云 + # 返回: + # BxNx3阵列,旋转的点云batch + """ Randomly rotate the point clouds to augument the dataset + rotation is per shape based along up direction + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, 0, sinval], + [0, 1, 0], + [-sinval, 0, cosval]]) + shape_pc = batch_data[k, ...] + rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + return rotated_data + + +# 按角度旋转点云 +def rotate_point_cloud_by_angle(batch_data, rotation_angle): + """ Rotate the point cloud along up direction with certain angle. + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + # rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, 0, sinval], + [0, 1, 0], + [-sinval, 0, cosval]]) + shape_pc = batch_data[k, ...] + rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + return rotated_data + + +# 抖动点云 +def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05): + """ Randomly jitter points. jittering is per point. + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, jittered batch of point clouds + """ + B, N, C = batch_data.shape + assert (clip > 0) + jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1 * clip, clip) + jittered_data += batch_data + return jittered_data + + +# 获得复数个数据文件 +def getDataFiles(list_filename): + return [line.rstrip() for line in open(list_filename)] + + +# 加载h5文件 +def load_h5(h5_filename): + f = h5py.File(h5_filename) + data = f['data'][:] + label = f['label'][:] + return (data, label) + + +# 获得单个数据文件 +def loadDataFile(filename): + return load_h5(filename) + + +# 加载h5数据标签段 +def load_h5_data_label_seg(h5_filename): + f = h5py.File(h5_filename) + data = f['data'][:] + label = f['label'][:] + seg = f['pid'][:] + return (data, label, seg) + + +# 用seg加载数据文件 +def loadDataFile_with_seg(filename): + return load_h5_data_label_seg(filename) diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..4a6683530 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train.py @@ -0,0 +1,452 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# import npu_device +# npu_device.open().as_default() + + +import argparse +# import math +# import h5py +import numpy as np +import tensorflow as tf +import socket +import importlib +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(BASE_DIR, 'models')) +sys.path.append(os.path.join(BASE_DIR, 'utils')) +import provider +# import tf_util +import time +import datetime +import ast +from npu_device.compat.v1.npu_init import * +import npu_device as npu +npu.compat.enable_v1() + +starttime = datetime.datetime.now() + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') +parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]') +parser.add_argument('--log_dir', default='log', help='Log dir [default: log]') +parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]') +parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]') +parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]') +parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]') +parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]') +parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]') +parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]') +parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]') +parser.add_argument('--data_path', type=str, default='', help='data path') +parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') +parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') +parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') +parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') +parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') +parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') +parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') +parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') +parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') +parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') +parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') +parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') +parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval,help='auto_tune flag, default is False') +FLAGS = parser.parse_args() + + +BATCH_SIZE = FLAGS.batch_size +NUM_POINT = FLAGS.num_point +MAX_EPOCH = FLAGS.max_epoch +BASE_LEARNING_RATE = FLAGS.learning_rate +GPU_INDEX = FLAGS.gpu +MOMENTUM = FLAGS.momentum +OPTIMIZER = FLAGS.optimizer +DECAY_STEP = FLAGS.decay_step +DECAY_RATE = FLAGS.decay_rate + +MODEL = importlib.import_module(FLAGS.model) # import network module +MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py') +LOG_DIR = FLAGS.log_dir +if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR) +os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def +os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure +LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w') +LOG_FOUT.write(str(FLAGS)+'\n') + +MAX_NUM_POINT = 2048 +NUM_CLASSES = 40 + +BN_INIT_DECAY = 0.5 +BN_DECAY_DECAY_RATE = 0.5 +BN_DECAY_DECAY_STEP = float(DECAY_STEP) +BN_DECAY_CLIP = 0.99 + +HOSTNAME = socket.gethostname() + +# ModelNet40 official train/test split +TRAIN_FILES = provider.getDataFiles( \ + os.path.join(FLAGS.data_path, 'modelnet40_ply_hdf5_2048/train_files.txt')) +TEST_FILES = provider.getDataFiles(\ + os.path.join(FLAGS.data_path, 'modelnet40_ply_hdf5_2048/test_files.txt')) + +def log_string(out_str): + LOG_FOUT.write(out_str+'\n') + LOG_FOUT.flush() + print(out_str) + + +# 计算指数衰减的学习率。训练时学习率最好随着训练衰减。 +# tf.train.exponential_decay函数实现指数衰减学习率。 +def get_learning_rate(batch): + # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。 + # 通过tf.train.exponential_decay函数实现指数衰减学习率。 + # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢, + # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。 + # tf.train.exponential_decay就是用来实现这个功能。 + # + # 步骤: + # 1.首先使用较大学习率(目的:为快速得到一个比较优的解); + # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定); + learning_rate = tf.compat.v1.train.exponential_decay( + BASE_LEARNING_RATE, # Base learning rate. + batch * BATCH_SIZE, # Current index into the dataset. + DECAY_STEP, # Decay step. + DECAY_RATE, # Decay rate. + staircase=True) + # 训练时学习率最好随着训练衰减,learning_rate最大取0.00001 (衰减后的学习率和0.00001取最大) + learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE! + return learning_rate + + +# 取得bn衰减 +# if the argument staircase is True, +# then global_step /decay_steps is an integer division and the decayed learning rate follows a staircase function. +# 计算衰减的Batch Normalization 的 decay。 +def get_bn_decay(batch): + # 指数衰减法 + + # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。 + # 通过tf.train.exponential_decay函数实现指数衰减学习率。 + # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢, + # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。 + # tf.train.exponential_decay就是用来实现这个功能。 + # + # 步骤: + # 1.首先使用较大学习率(目的:为快速得到一个比较优的解); + # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定); + bn_momentum = tf.compat.v1.train.exponential_decay( + BN_INIT_DECAY, + batch*BATCH_SIZE, + BN_DECAY_DECAY_STEP, + BN_DECAY_DECAY_RATE, + staircase=True) + # bn衰减0.99和1-衰减后的动量,取最小 + bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum) + return bn_decay + + +# 初始运行的训练函数。 +# 这一段主要是通过placeholder进行赋值, 模型的参数准备和构建整个训练网络(数据处理+loss+优化器),模型记录工作,最后进行训练. +def train(): + # 将这个类实例,也就是新生成的图作为整个 tensorflow 运行环境的默认图 + with tf.Graph().as_default(): + # 如果需要切换成CPU运算,可以调用tf.device(device_name)函数,其中device_name格式如 /cpu:0 其中的0表示设备号, + # TF不区分CPU的设备号,设置为0即可。GPU区分设备号 /gpu:0 和 /gpu:1 表示两张不同的显卡。 + # with tf.device('/gpu:'+str(GPU_INDEX)): + with tf.device('/gpu:0'): + # 使用了pointne_cls.py的placeholder_inputs()方法。 + # 取得占位符,点云,标签。 输入是 一批数据的数量,点的数量。 + # placeholder()函数是在神经网络构建graph的时候在模型中的占位,此时并没有把要输入的数据传入模型, + # 它只会分配必要的内存,用于传入外部数据。 + pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) + # 向指定好的对象中喂入数据:tf.placeholder() + # 取得占位符:是否在训练。 + is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=()) + print(is_training_pl) + + # Note the global_step=batch parameter to minimize. + # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains. + # 将 global_step = batch 参数最小化。 + # 这是在告诉优化器 在每次训练时 为你有用地增加'batch'参数。 + # 定义 batch = 0 + batch = tf.Variable(0) + # 取得bn衰减(自定义方法) + bn_decay = get_bn_decay(batch) + # 用来显示标量信息,一般在画loss,accuary时会用到这个函数。 + tf.compat.v1.summary.scalar('bn_decay', bn_decay) + + # Get model and loss + # 创建的数据处理网络为pred,调用 model\pointnet_cls 下的get_model()得到。由get_model()可知, + # pred的维度为B×N×40,40为分出的类别Channel数,对应40个分类标签。每个点的这40个值最大的一个的下标即为所预测的分类标签。 + # 首先使用共享参数的MLP对每个点进行特征提取,再使用MaxPooling在特征维进行池化操作, + # 使得网络对不同数量点的点云产生相同维度的特征向量,且输出对输入点的顺序产生不变性。 + # 在得到固定维度的特征向量之后,再使用一个MLP对其进行分类。 + pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay) + # 调用pointnet_cls下的get_loss() + loss = MODEL.get_loss(pred, labels_pl, end_points) + tf.compat.v1.summary.scalar('loss', loss) + + # tf.argmax(pred, 2) 返回pred C 这个维度的最大值索引返回相同维度的bool值矩阵 + # tf.equal() 比较两个张量对应位置是否相等 + correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64)) + # 压缩求和,用于降维 + accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE) + tf.compat.v1.summary.scalar('accuracy', accuracy) + + # Get training operator + # 取得学习率(自定义方法),获得衰减后的学习率,以及选择优化器optimizer。 + learning_rate = get_learning_rate(batch) + tf.compat.v1.summary.scalar('learning_rate', learning_rate) + if OPTIMIZER == 'momentum': + optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM) + elif OPTIMIZER == 'adam': + optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate) + # minimize的内部存在两个操作:(1)计算各个变量的梯度 (2)用梯度更新这些变量的值 + # (1)计算loss对指定val_list的梯度(导数),返回元组列表[(gradient,variable),…] + # (2)用计算得到的梯度来更新对应的变量(权重) + # 注意:在程序中global_step初始化为0,每次更新参数时,自动加1 + # 将minimize()分成两个步骤的原因:在某种情况下对梯度进行修正,防止梯度消失或者梯度爆炸 + train_op = optimizer.minimize(loss, global_step=batch) + + # Add ops to save and restore all the variables. + saver = tf.compat.v1.train.Saver() + + # Create a session + # 配置session 运行参数。 + # 创建sess的时候对sess进行参数配置 + config = tf.compat.v1.ConfigProto() + custom_op = config.graph_options.rewrite_options.custom_optimizers.add() + custom_op.name = 'NpuOptimizer' + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(FLAGS.precision_mode) + if FLAGS.data_dump_flag: + custom_op.parameter_map["enable_dump"].b = True + custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.data_dump_path) + custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes(FLAGS.data_dump_step) + custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all") + if FLAGS.over_dump: + custom_op.parameter_map["enable_dump_debug"].b = True + custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.over_dump_path) + custom_op.parameter_map["dump_debug_mode"].s = tf.compat.as_bytes("all") + if FLAGS.profiling: + custom_op.parameter_map["precision_mode"].b = True + profiling_options = '{"output":"' + FLAGS.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + custom_op.parameter_map["profiling_options"].s = tf.compat.as_bytes(profiling_options) + if FLAGS.use_mixlist and FLAGS.precision_mode=='allow_mix_precision': + custom_op.parameter_map["modify_mixlist"].s = tf.compat.as_bytes(FLAGS.mixlist_file) + if FLAGS.fusion_off_flag: + custom_op.parameter_map["sfusion_switch_file"].s = tf.compat.as_bytes(FLAGS.fusion_off_file) + if FLAGS.auto_tune: + custom_op.parameter_map["auto_tune_mode"].s = tf.compat.as_bytes("RL,GA") + config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 必须显式关闭 + config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF # 必须显式关闭 + + # =True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。 + config.gpu_options.allow_growth = True + # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行 + config.allow_soft_placement = True + # 在终端打印出各项操作是在哪个设备上运行的 + config.log_device_placement = False + # 创建 sess, 才能运行框架 + sess = tf.compat.v1.Session(config=config) + + # Add summary writers + #merged = tf.merge_all_summaries() + merged = tf.compat.v1.summary.merge_all() + train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'), + sess.graph) + test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test')) + + # Init variables + # 初始化参数,开始训练 + # train_one_epoch 函数用来训练一个epoch,eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的 + # accuracy和loss。每10个epoch保存1次模型。 + init = tf.compat.v1.global_variables_initializer() + # To fix the bug introduced in TF 0.12.1 as in + # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1 + #sess.run(init) + # 运行sess初始化所有的全局变量 + sess.run(init, {is_training_pl: True}) + + # ops 是一个字典,作为接口传入训练和评估 epoch 循环中。 + # pred 是数据处理网络模块;loss 是 损失函数;train_op 是优化器;batch 是当前的批次 + ops = {'pointclouds_pl': pointclouds_pl, + 'labels_pl': labels_pl, + 'is_training_pl': is_training_pl, + 'pred': pred, + 'loss': loss, + 'train_op': train_op, + 'merged': merged, + 'step': batch} + + for epoch in range(MAX_EPOCH): + # log(自定义方法) + log_string('**** EPOCH %03d ****' % (epoch)) + # 在同一个位置刷新输出 + sys.stdout.flush() + + # 训练一个批次(自定义方法) + # train_one_epoch 函数用来训练一个epoch + train_one_epoch(sess, ops, train_writer) + # 评估一个批次(自定义方法) + # eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的accuracy和loss + eval_one_epoch(sess, ops, test_writer) + + # Save the variables to disk. + # Save the variables to disk.每10个epoch保存1次模型 + if epoch % 10 == 0: + save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt")) + # log(自定义方法) + log_string("Model saved in file: %s" % save_path) + + +# provider.shuffle_data 函数随机打乱数据,返回打乱后的数据。 +# num_batches = file_size/BATCH_SIZE,计算在指定BATCH_SIZE下,训练1个epoch 需要几个mini-batch训练。 +def train_one_epoch(sess, ops, train_writer): + """ ops: dict mapping from string to tf ops """ + is_training = True + + # Shuffle train files + # 随机打乱训练数据 + train_file_idxs = np.arange(0, len(TRAIN_FILES)) + np.random.shuffle(train_file_idxs) + + L = [] + for fn in range(len(TRAIN_FILES)): + log_string('----' + str(fn) + '-----') + current_data, current_label = provider.loadDataFile(os.path.join(FLAGS.data_path, TRAIN_FILES[train_file_idxs[fn]])) + current_data = current_data[:,0:NUM_POINT,:] + current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) + current_label = np.squeeze(current_label) + + file_size = current_data.shape[0] + num_batches = file_size // BATCH_SIZE + + total_correct = 0 + total_seen = 0 + loss_sum = 0 + endtime = datetime.datetime.now() + if fn == 0: + TOTLE_TIME = (endtime - starttime).seconds + L.append(TOTLE_TIME) + + # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数 + + # total_senn,总损失loss_sum. + for batch_idx in range(num_batches): + start_time = time.time() + start_idx = batch_idx * BATCH_SIZE + end_idx = (batch_idx+1) * BATCH_SIZE + + # Augment batched point clouds by rotation and jittering + # 调用provider中rotate_point_cloud + rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :]) + jittered_data = provider.jitter_point_cloud(rotated_data) + feed_dict = {ops['pointclouds_pl']: jittered_data, + ops['labels_pl']: current_label[start_idx:end_idx], + ops['is_training_pl']: is_training,} + summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], + ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) + cost_time = time.time() - start_time + FPS = BATCH_SIZE / cost_time + # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据 + train_writer.add_summary(summary, step) + pred_val = np.argmax(pred_val, 1) + correct = np.sum(pred_val == current_label[start_idx:end_idx]) + total_correct += correct + total_seen += BATCH_SIZE + loss_sum += loss_val + + # 记录平均loss,以及平均accuracy。 + log_string('TOTLE_TIME : %.2f' % (float(L[0]))) + log_string('FPS : %.2f' % (float(FPS))) + log_string('mean loss: %f' % (loss_sum / float(num_batches))) + log_string('accuracy: %f' % (total_correct / float(total_seen))) + + +def eval_one_epoch(sess, ops, test_writer): + """ ops: dict mapping from string to tf ops """ + is_training = False + total_correct = 0 + total_seen = 0 + loss_sum = 0 + total_seen_class = [0 for _ in range(NUM_CLASSES)] + total_correct_class = [0 for _ in range(NUM_CLASSES)] + + for fn in range(len(TEST_FILES)): + log_string('----' + str(fn) + '-----') + current_data, current_label = provider.loadDataFile(os.path.join(FLAGS.data_path, TEST_FILES[fn])) + current_data = current_data[:,0:NUM_POINT,:] + current_label = np.squeeze(current_label) + + file_size = current_data.shape[0] + num_batches = file_size // BATCH_SIZE + + for batch_idx in range(num_batches): + start_idx = batch_idx * BATCH_SIZE + end_idx = (batch_idx+1) * BATCH_SIZE + + feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :], + ops['labels_pl']: current_label[start_idx:end_idx], + ops['is_training_pl']: is_training} + summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], + ops['loss'], ops['pred']], feed_dict=feed_dict) + pred_val = np.argmax(pred_val, 1) + correct = np.sum(pred_val == current_label[start_idx:end_idx]) + total_correct += correct + total_seen += BATCH_SIZE + loss_sum += (loss_val*BATCH_SIZE) + for i in range(start_idx, end_idx): + l = current_label[i] + total_seen_class[l] += 1 + total_correct_class[l] += (pred_val[i-start_idx] == l) + + log_string('eval mean loss: %f' % (loss_sum / float(total_seen))) + log_string('eval accuracy: %f'% (total_correct / float(total_seen))) + log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) + + + +if __name__ == "__main__": + train() + LOG_FOUT.close() diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train_real.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train_real.py new file mode 100644 index 000000000..34c60ca17 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/train_real.py @@ -0,0 +1,381 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import math +import h5py +import numpy as np +import tensorflow as tf +import socket +import importlib +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(BASE_DIR, 'models')) +sys.path.append(os.path.join(BASE_DIR, 'utils')) +import provider +import tf_util + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') +parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]') +parser.add_argument('--log_dir', default='log', help='Log dir [default: log]') +parser.add_argument('--num_point', type=int, default=4096, help='Point Number [256/512/1024/2048] [default: 1024]') +parser.add_argument('--max_epoch', type=int, default=100, help='Epoch to run [default: 250]') +parser.add_argument('--batch_size', type=int, default=5, help='Batch Size during training [default: 32]') +parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]') +parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]') +parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]') +parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]') +parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]') +FLAGS = parser.parse_args() + + +BATCH_SIZE = FLAGS.batch_size +NUM_POINT = FLAGS.num_point +MAX_EPOCH = FLAGS.max_epoch +BASE_LEARNING_RATE = FLAGS.learning_rate +GPU_INDEX = FLAGS.gpu +MOMENTUM = FLAGS.momentum +OPTIMIZER = FLAGS.optimizer +DECAY_STEP = FLAGS.decay_step +DECAY_RATE = FLAGS.decay_rate + +MODEL = importlib.import_module(FLAGS.model) # import network module +MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py') +LOG_DIR = FLAGS.log_dir +if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR) +os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def +os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure +LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w') +LOG_FOUT.write(str(FLAGS)+'\n') + +MAX_NUM_POINT = 4096 +NUM_CLASSES = 40 + +BN_INIT_DECAY = 0.5 +BN_DECAY_DECAY_RATE = 0.5 +BN_DECAY_DECAY_STEP = float(DECAY_STEP) +BN_DECAY_CLIP = 0.99 + +HOSTNAME = socket.gethostname() + +# ModelNet40 official train/test split +TRAIN_FILES = provider.getDataFiles( \ + os.path.join(BASE_DIR, 'data_real/train_files.txt')) +TEST_FILES = provider.getDataFiles(\ + os.path.join(BASE_DIR, 'data_real/test_files.txt')) +print(TRAIN_FILES) +print(TEST_FILES) + +def log_string(out_str): + LOG_FOUT.write(out_str+'\n') + LOG_FOUT.flush() + print(out_str) + + +# 计算指数衰减的学习率。训练时学习率最好随着训练衰减。 +# tf.train.exponential_decay函数实现指数衰减学习率。 +def get_learning_rate(batch): + # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。 + # 通过tf.train.exponential_decay函数实现指数衰减学习率。 + # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢, + # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。 + # tf.train.exponential_decay就是用来实现这个功能。 + # + # 步骤: + # 1.首先使用较大学习率(目的:为快速得到一个比较优的解); + # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定); + learning_rate = tf.compat.v1.train.exponential_decay( + BASE_LEARNING_RATE, # Base learning rate. + batch * BATCH_SIZE, # Current index into the dataset. + DECAY_STEP, # Decay step. + DECAY_RATE, # Decay rate. + staircase=True) + # 训练时学习率最好随着训练衰减,learning_rate最大取0.00001 (衰减后的学习率和0.00001取最大) + learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE! + return learning_rate + + +# 取得bn衰减 +# if the argument staircase is True, +# then global_step /decay_steps is an integer division and the decayed learning rate follows a staircase function. +# 计算衰减的Batch Normalization 的 decay。 +def get_bn_decay(batch): + # 指数衰减法 + + # 在Tensorflow中,为解决设定学习率(learning rate)问题,提供了指数衰减法来解决。 + # 通过tf.train.exponential_decay函数实现指数衰减学习率。 + # 学习率较大容易搜索震荡(在最优值附近徘徊),学习率较小则收敛速度较慢, + # 那么可以通过初始定义一个较大的学习率,通过设置decay_rate来缩小学习率,减少迭代次数。 + # tf.train.exponential_decay就是用来实现这个功能。 + # + # 步骤: + # 1.首先使用较大学习率(目的:为快速得到一个比较优的解); + # 2.然后通过迭代逐步减小学习率(目的:为使模型在训练后期更加稳定); + bn_momentum = tf.compat.v1.train.exponential_decay( + BN_INIT_DECAY, + batch*BATCH_SIZE, + BN_DECAY_DECAY_STEP, + BN_DECAY_DECAY_RATE, + staircase=True) + # bn衰减0.99和1-衰减后的动量,取最小 + bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum) + return bn_decay + + +# 初始运行的训练函数。 +# 这一段主要是通过placeholder进行赋值, 模型的参数准备和构建整个训练网络(数据处理+loss+优化器),模型记录工作,最后进行训练. +def train(): + # 将这个类实例,也就是新生成的图作为整个 tensorflow 运行环境的默认图 + with tf.Graph().as_default(): + # 如果需要切换成CPU运算,可以调用tf.device(device_name)函数,其中device_name格式如 /cpu:0 其中的0表示设备号, + # TF不区分CPU的设备号,设置为0即可。GPU区分设备号 /gpu:0 和 /gpu:1 表示两张不同的显卡。 + # with tf.device('/gpu:'+str(GPU_INDEX)): + with tf.device('/cpu:0'): + # 使用了pointne_cls.py的placeholder_inputs()方法。 + # 取得占位符,点云,标签。 输入是 一批数据的数量,点的数量。 + # placeholder()函数是在神经网络构建graph的时候在模型中的占位,此时并没有把要输入的数据传入模型, + # 它只会分配必要的内存,用于传入外部数据。 + pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) + # 向指定好的对象中喂入数据:tf.placeholder() + # 取得占位符:是否在训练。 + is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=()) + print(is_training_pl) + + # Note the global_step=batch parameter to minimize. + # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains. + # 将 global_step = batch 参数最小化。 + # 这是在告诉优化器 在每次训练时 为你有用地增加'batch'参数。 + # 定义 batch = 0 + batch = tf.Variable(0) + # 取得bn衰减(自定义方法) + bn_decay = get_bn_decay(batch) + # 用来显示标量信息,一般在画loss,accuary时会用到这个函数。 + tf.compat.v1.summary.scalar('bn_decay', bn_decay) + + # Get model and loss + # 创建的数据处理网络为pred,调用 model\pointnet_cls 下的get_model()得到。由get_model()可知, + # pred的维度为B×N×40,40为分出的类别Channel数,对应40个分类标签。每个点的这40个值最大的一个的下标即为所预测的分类标签。 + # 首先使用共享参数的MLP对每个点进行特征提取,再使用MaxPooling在特征维进行池化操作, + # 使得网络对不同数量点的点云产生相同维度的特征向量,且输出对输入点的顺序产生不变性。 + # 在得到固定维度的特征向量之后,再使用一个MLP对其进行分类。 + pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay) + # 调用pointnet_cls下的get_loss() + loss = MODEL.get_loss(pred, labels_pl, end_points) + tf.compat.v1.summary.scalar('loss', loss) + + # tf.argmax(pred, 2) 返回pred C 这个维度的最大值索引返回相同维度的bool值矩阵 + # tf.equal() 比较两个张量对应位置是否相等 + correct = tf.equal(tf.argmax(input=pred, axis=1), tf.cast(labels_pl, dtype=tf.int64)) + # 压缩求和,用于降维 + accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE) + tf.compat.v1.summary.scalar('accuracy', accuracy) + + # Get training operator + # 取得学习率(自定义方法),获得衰减后的学习率,以及选择优化器optimizer。 + learning_rate = get_learning_rate(batch) + tf.compat.v1.summary.scalar('learning_rate', learning_rate) + if OPTIMIZER == 'momentum': + optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM) + elif OPTIMIZER == 'adam': + optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate) + # minimize的内部存在两个操作:(1)计算各个变量的梯度 (2)用梯度更新这些变量的值 + # (1)计算loss对指定val_list的梯度(导数),返回元组列表[(gradient,variable),…] + # (2)用计算得到的梯度来更新对应的变量(权重) + # 注意:在程序中global_step初始化为0,每次更新参数时,自动加1 + # 将minimize()分成两个步骤的原因:在某种情况下对梯度进行修正,防止梯度消失或者梯度爆炸 + train_op = optimizer.minimize(loss, global_step=batch) + + # Add ops to save and restore all the variables. + saver = tf.compat.v1.train.Saver() + + # Create a session + # 配置session 运行参数。 + # 创建sess的时候对sess进行参数配置 + config = tf.compat.v1.ConfigProto() + # =True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。 + config.gpu_options.allow_growth = True + # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行 + config.allow_soft_placement = True + # 在终端打印出各项操作是在哪个设备上运行的 + config.log_device_placement = False + # 创建 sess, 才能运行框架 + sess = tf.compat.v1.Session(config=config) + + # Add summary writers + #merged = tf.merge_all_summaries() + merged = tf.compat.v1.summary.merge_all() + train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'), + sess.graph) + test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test')) + + # Init variables + # 初始化参数,开始训练 + # train_one_epoch 函数用来训练一个epoch,eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的 + # accuracy和loss。每10个epoch保存1次模型。 + init = tf.compat.v1.global_variables_initializer() + # To fix the bug introduced in TF 0.12.1 as in + # http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1 + #sess.run(init) + # 运行sess初始化所有的全局变量 + sess.run(init, {is_training_pl: True}) + + # ops 是一个字典,作为接口传入训练和评估 epoch 循环中。 + # pred 是数据处理网络模块;loss 是 损失函数;train_op 是优化器;batch 是当前的批次 + ops = {'pointclouds_pl': pointclouds_pl, + 'labels_pl': labels_pl, + 'is_training_pl': is_training_pl, + 'pred': pred, + 'loss': loss, + 'train_op': train_op, + 'merged': merged, + 'step': batch} + + for epoch in range(MAX_EPOCH): + # log(自定义方法) + log_string('**** EPOCH %03d ****' % (epoch)) + # 在同一个位置刷新输出 + sys.stdout.flush() + + # 训练一个批次(自定义方法) + # train_one_epoch 函数用来训练一个epoch + train_one_epoch(sess, ops, train_writer) + # 评估一个批次(自定义方法) + # eval_one_epoch函数用来每运行一个epoch后evaluate在测试集的accuracy和loss + eval_one_epoch(sess, ops, test_writer) + + # Save the variables to disk. + # Save the variables to disk.每10个epoch保存1次模型 + if epoch % 10 == 0: + save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt")) + # log(自定义方法) + log_string("Model saved in file: %s" % save_path) + + +# provider.shuffle_data 函数随机打乱数据,返回打乱后的数据。 +# num_batches = file_size/BATCH_SIZE,计算在指定BATCH_SIZE下,训练1个epoch 需要几个mini-batch训练。 +def train_one_epoch(sess, ops, train_writer): + """ ops: dict mapping from string to tf ops """ + is_training = True + + # Shuffle train files + # 随机打乱训练数据 + train_file_idxs = np.arange(0, len(TRAIN_FILES)) + np.random.shuffle(train_file_idxs) + + for fn in range(len(TRAIN_FILES)): + log_string('----' + str(fn) + '-----') + current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]]) + current_data = current_data[:,0:NUM_POINT,:] + current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label)) + current_label = np.squeeze(current_label) + + file_size = current_data.shape[0] + num_batches = file_size // BATCH_SIZE + + total_correct = 0 + total_seen = 0 + loss_sum = 0 + + # 在一个epoch 中逐个mini-batch训练直至遍历完一遍训练集。计算总分类正确数total_correct和已遍历样本数 + # total_senn,总损失loss_sum. + for batch_idx in range(num_batches): + start_idx = batch_idx * BATCH_SIZE + end_idx = (batch_idx+1) * BATCH_SIZE + + # Augment batched point clouds by rotation and jittering + # 调用provider中rotate_point_cloud + rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :]) + jittered_data = provider.jitter_point_cloud(rotated_data) + feed_dict = {ops['pointclouds_pl']: jittered_data, + ops['labels_pl']: current_label[start_idx:end_idx], + ops['is_training_pl']: is_training,} + summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], + ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) + # 训练,使用 tf 的 session 运行设计的框架,ops['pred'] 为整个网络,feed_dict 为网络提供的数据 + train_writer.add_summary(summary, step) + pred_val = np.argmax(pred_val, 1) + correct = np.sum(pred_val == current_label[start_idx:end_idx]) + total_correct += correct + total_seen += BATCH_SIZE + loss_sum += loss_val + + # 记录平均loss,以及平均accuracy。 + log_string('mean loss: %f' % (loss_sum / float(num_batches))) + log_string('accuracy: %f' % (total_correct / float(total_seen))) + + +def eval_one_epoch(sess, ops, test_writer): + """ ops: dict mapping from string to tf ops """ + is_training = False + total_correct = 0 + total_seen = 0 + loss_sum = 0 + total_seen_class = [0 for _ in range(NUM_CLASSES)] + total_correct_class = [0 for _ in range(NUM_CLASSES)] + + for fn in range(len(TEST_FILES)): + log_string('----' + str(fn) + '-----') + current_data, current_label = provider.loadDataFile(TEST_FILES[fn]) + current_data = current_data[:,0:NUM_POINT,:] + current_label = np.squeeze(current_label) + + file_size = current_data.shape[0] + num_batches = file_size // BATCH_SIZE + + for batch_idx in range(num_batches): + start_idx = batch_idx * BATCH_SIZE + end_idx = (batch_idx+1) * BATCH_SIZE + + feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :], + ops['labels_pl']: current_label[start_idx:end_idx], + ops['is_training_pl']: is_training} + summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], + ops['loss'], ops['pred']], feed_dict=feed_dict) + pred_val = np.argmax(pred_val, 1) + correct = np.sum(pred_val == current_label[start_idx:end_idx]) + total_correct += correct + total_seen += BATCH_SIZE + loss_sum += (loss_val*BATCH_SIZE) + for i in range(start_idx, end_idx): + l = current_label[i] + total_seen_class[l] += 1 + total_correct_class[l] += (pred_val[i-start_idx] == l) + + log_string('eval mean loss: %f' % (loss_sum / float(total_seen))) + log_string('eval accuracy: %f'% (total_correct / float(total_seen))) + log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) + + + +if __name__ == "__main__": + train() + LOG_FOUT.close() -- Gitee From f13bfd0462b0923220218c5308b58e2813e2c170 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:31:34 +0000 Subject: [PATCH 21/54] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow2/built-in/keras=5Fsample/PointNet=5FID2913=5Ffor=5FTen?= =?UTF-8?q?sorFlow2.X/.keep?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/.keep deleted file mode 100644 index e69de29bb..000000000 -- Gitee From eb5a768c3aad0666c79b7d07ae3242f5a9213f70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:31:46 +0000 Subject: [PATCH 22/54] =?UTF-8?q?PointNet=5FID2913=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../utils/data_prep_util.py | 174 ++++ .../utils/eulerangles.py | 447 +++++++++ .../utils/pc_util.py | 227 +++++ .../utils/plyfile.py | 932 ++++++++++++++++++ .../utils/tf_util.py | 614 ++++++++++++ 5 files changed, 2394 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/data_prep_util.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/eulerangles.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/pc_util.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/plyfile.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/tf_util.py diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/data_prep_util.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/data_prep_util.py new file mode 100644 index 000000000..5ec338160 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/data_prep_util.py @@ -0,0 +1,174 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +from plyfile import (PlyData, PlyElement, make2d, PlyParseError, PlyProperty) +import numpy as np +import h5py + +SAMPLING_BIN = os.path.join(BASE_DIR, 'third_party/mesh_sampling/build/pcsample') + +SAMPLING_POINT_NUM = 2048 +SAMPLING_LEAF_SIZE = 0.005 + +MODELNET40_PATH = '../datasets/modelnet40' +def export_ply(pc, filename): + vertex = np.zeros(pc.shape[0], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')]) + for i in range(pc.shape[0]): + vertex[i] = (pc[i][0], pc[i][1], pc[i][2]) + ply_out = PlyData([PlyElement.describe(vertex, 'vertex', comments=['vertices'])]) + ply_out.write(filename) + +# Sample points on the obj shape +def get_sampling_command(obj_filename, ply_filename): + cmd = SAMPLING_BIN + ' ' + obj_filename + cmd += ' ' + ply_filename + cmd += ' -n_samples %d ' % SAMPLING_POINT_NUM + cmd += ' -leaf_size %f ' % SAMPLING_LEAF_SIZE + return cmd + +# -------------------------------------------------------------- +# Following are the helper functions to load MODELNET40 shapes +# -------------------------------------------------------------- + +# Read in the list of categories in MODELNET40 +def get_category_names(): + shape_names_file = os.path.join(MODELNET40_PATH, 'shape_names.txt') + shape_names = [line.rstrip() for line in open(shape_names_file)] + return shape_names + +# Return all the filepaths for the shapes in MODELNET40 +def get_obj_filenames(): + obj_filelist_file = os.path.join(MODELNET40_PATH, 'filelist.txt') + obj_filenames = [os.path.join(MODELNET40_PATH, line.rstrip()) for line in open(obj_filelist_file)] + print('Got %d obj files in modelnet40.' % len(obj_filenames)) + return obj_filenames + +# Helper function to create the father folder and all subdir folders if not exist +def batch_mkdir(output_folder, subdir_list): + if not os.path.exists(output_folder): + os.mkdir(output_folder) + for subdir in subdir_list: + if not os.path.exists(os.path.join(output_folder, subdir)): + os.mkdir(os.path.join(output_folder, subdir)) + +# ---------------------------------------------------------------- +# Following are the helper functions to load save/load HDF5 files +# ---------------------------------------------------------------- + +# Write numpy array data and label to h5_filename +def save_h5_data_label_normal(h5_filename, data, label, normal, + data_dtype='float32', label_dtype='uint8', normal_dtype='float32'): + h5_fout = h5py.File(h5_filename) + h5_fout.create_dataset( + 'data', data=data, + compression='gzip', compression_opts=4, + dtype=data_dtype) + h5_fout.create_dataset( + 'normal', data=normal, + compression='gzip', compression_opts=4, + dtype=normal_dtype) + h5_fout.create_dataset( + 'label', data=label, + compression='gzip', compression_opts=1, + dtype=label_dtype) + h5_fout.close() + + +# Write numpy array data and label to h5_filename +def save_h5(h5_filename, data, label, data_dtype='uint8', label_dtype='uint8'): + h5_fout = h5py.File(h5_filename) + h5_fout.create_dataset( + 'data', data=data, + compression='gzip', compression_opts=4, + dtype=data_dtype) + h5_fout.create_dataset( + 'label', data=label, + compression='gzip', compression_opts=1, + dtype=label_dtype) + h5_fout.close() + +# Read numpy array data and label from h5_filename +def load_h5_data_label_normal(h5_filename): + f = h5py.File(h5_filename) + data = f['data'][:] + label = f['label'][:] + normal = f['normal'][:] + return (data, label, normal) + +# Read numpy array data and label from h5_filename +def load_h5_data_label_seg(h5_filename): + f = h5py.File(h5_filename) + data = f['data'][:] + label = f['label'][:] + seg = f['pid'][:] + return (data, label, seg) + +# Read numpy array data and label from h5_filename +def load_h5(h5_filename): + f = h5py.File(h5_filename) + data = f['data'][:] + label = f['label'][:] + return (data, label) + +# ---------------------------------------------------------------- +# Following are the helper functions to load save/load PLY files +# ---------------------------------------------------------------- + +# Load PLY file +def load_ply_data(filename, point_num): + plydata = PlyData.read(filename) + pc = plydata['vertex'].data[:point_num] + pc_array = np.array([[x, y, z] for x,y,z in pc]) + return pc_array + +# Load PLY file +def load_ply_normal(filename, point_num): + plydata = PlyData.read(filename) + pc = plydata['normal'].data[:point_num] + pc_array = np.array([[x, y, z] for x,y,z in pc]) + return pc_array + +# Make up rows for Nxk array +# Input Pad is 'edge' or 'constant' +def pad_arr_rows(arr, row, pad='edge'): + assert(len(arr.shape) == 2) + assert(arr.shape[0] <= row) + assert(pad == 'edge' or pad == 'constant') + if arr.shape[0] == row: + return arr + if pad == 'edge': + return np.lib.pad(arr, ((0, row-arr.shape[0]), (0, 0)), 'edge') + if pad == 'constant': + return np.lib.pad(arr, ((0, row-arr.shape[0]), (0, 0)), 'constant', (0, 0)) + + diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/eulerangles.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/eulerangles.py new file mode 100644 index 000000000..9157409ae --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/eulerangles.py @@ -0,0 +1,447 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +''' Module implementing Euler angle rotations and their conversions + +See: + +* http://en.wikipedia.org/wiki/Rotation_matrix +* http://en.wikipedia.org/wiki/Euler_angles +* http://mathworld.wolfram.com/EulerAngles.html + +See also: *Representing Attitude with Euler Angles and Quaternions: A +Reference* (2006) by James Diebel. A cached PDF link last found here: + +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.110.5134 + +Euler's rotation theorem tells us that any rotation in 3D can be +described by 3 angles. Let's call the 3 angles the *Euler angle vector* +and call the angles in the vector :math:`alpha`, :math:`beta` and +:math:`gamma`. The vector is [ :math:`alpha`, +:math:`beta`. :math:`gamma` ] and, in this description, the order of the +parameters specifies the order in which the rotations occur (so the +rotation corresponding to :math:`alpha` is applied first). + +In order to specify the meaning of an *Euler angle vector* we need to +specify the axes around which each of the rotations corresponding to +:math:`alpha`, :math:`beta` and :math:`gamma` will occur. + +There are therefore three axes for the rotations :math:`alpha`, +:math:`beta` and :math:`gamma`; let's call them :math:`i` :math:`j`, +:math:`k`. + +Let us express the rotation :math:`alpha` around axis `i` as a 3 by 3 +rotation matrix `A`. Similarly :math:`beta` around `j` becomes 3 x 3 +matrix `B` and :math:`gamma` around `k` becomes matrix `G`. Then the +whole rotation expressed by the Euler angle vector [ :math:`alpha`, +:math:`beta`. :math:`gamma` ], `R` is given by:: + + R = np.dot(G, np.dot(B, A)) + +See http://mathworld.wolfram.com/EulerAngles.html + +The order :math:`G B A` expresses the fact that the rotations are +performed in the order of the vector (:math:`alpha` around axis `i` = +`A` first). + +To convert a given Euler angle vector to a meaningful rotation, and a +rotation matrix, we need to define: + +* the axes `i`, `j`, `k` +* whether a rotation matrix should be applied on the left of a vector to + be transformed (vectors are column vectors) or on the right (vectors + are row vectors). +* whether the rotations move the axes as they are applied (intrinsic + rotations) - compared the situation where the axes stay fixed and the + vectors move within the axis frame (extrinsic) +* the handedness of the coordinate system + +See: http://en.wikipedia.org/wiki/Rotation_matrix#Ambiguities + +We are using the following conventions: + +* axes `i`, `j`, `k` are the `z`, `y`, and `x` axes respectively. Thus + an Euler angle vector [ :math:`alpha`, :math:`beta`. :math:`gamma` ] + in our convention implies a :math:`alpha` radian rotation around the + `z` axis, followed by a :math:`beta` rotation around the `y` axis, + followed by a :math:`gamma` rotation around the `x` axis. +* the rotation matrix applies on the left, to column vectors on the + right, so if `R` is the rotation matrix, and `v` is a 3 x N matrix + with N column vectors, the transformed vector set `vdash` is given by + ``vdash = np.dot(R, v)``. +* extrinsic rotations - the axes are fixed, and do not move with the + rotations. +* a right-handed coordinate system + +The convention of rotation around ``z``, followed by rotation around +``y``, followed by rotation around ``x``, is known (confusingly) as +"xyz", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles. +''' + +import math + +import sys +if sys.version_info >= (3,0): + from functools import reduce + +import numpy as np + + +_FLOAT_EPS_4 = np.finfo(float).eps * 4.0 + + +def euler2mat(z=0, y=0, x=0): + ''' Return matrix for rotations around z, y and x axes + + Uses the z, then y, then x convention above + + Parameters + ---------- + z : scalar + Rotation angle in radians around z-axis (performed first) + y : scalar + Rotation angle in radians around y-axis + x : scalar + Rotation angle in radians around x-axis (performed last) + + Returns + ------- + M : array shape (3,3) + Rotation matrix giving same rotation as for given angles + + Examples + -------- + >>> zrot = 1.3 # radians + >>> yrot = -0.1 + >>> xrot = 0.2 + >>> M = euler2mat(zrot, yrot, xrot) + >>> M.shape == (3, 3) + True + + The output rotation matrix is equal to the composition of the + individual rotations + + >>> M1 = euler2mat(zrot) + >>> M2 = euler2mat(0, yrot) + >>> M3 = euler2mat(0, 0, xrot) + >>> composed_M = np.dot(M3, np.dot(M2, M1)) + >>> np.allclose(M, composed_M) + True + + You can specify rotations by named arguments + + >>> np.all(M3 == euler2mat(x=xrot)) + True + + When applying M to a vector, the vector should column vector to the + right of M. If the right hand side is a 2D array rather than a + vector, then each column of the 2D array represents a vector. + + >>> vec = np.array([1, 0, 0]).reshape((3,1)) + >>> v2 = np.dot(M, vec) + >>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array + >>> vecs2 = np.dot(M, vecs) + + Rotations are counter-clockwise. + + >>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3)) + >>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]]) + True + >>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3)) + >>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]]) + True + >>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3)) + >>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]]) + True + + Notes + ----- + The direction of rotation is given by the right-hand rule (orient + the thumb of the right hand along the axis around which the rotation + occurs, with the end of the thumb at the positive end of the axis; + curl your fingers; the direction your fingers curl is the direction + of rotation). Therefore, the rotations are counterclockwise if + looking along the axis of rotation from positive to negative. + ''' + Ms = [] + if z: + cosz = math.cos(z) + sinz = math.sin(z) + Ms.append(np.array( + [[cosz, -sinz, 0], + [sinz, cosz, 0], + [0, 0, 1]])) + if y: + cosy = math.cos(y) + siny = math.sin(y) + Ms.append(np.array( + [[cosy, 0, siny], + [0, 1, 0], + [-siny, 0, cosy]])) + if x: + cosx = math.cos(x) + sinx = math.sin(x) + Ms.append(np.array( + [[1, 0, 0], + [0, cosx, -sinx], + [0, sinx, cosx]])) + if Ms: + return reduce(np.dot, Ms[::-1]) + return np.eye(3) + + +def mat2euler(M, cy_thresh=None): + ''' Discover Euler angle vector from 3x3 matrix + + Uses the conventions above. + + Parameters + ---------- + M : array-like, shape (3,3) + cy_thresh : None or scalar, optional + threshold below which to give up on straightforward arctan for + estimating x rotation. If None (default), estimate from + precision of input. + + Returns + ------- + z : scalar + y : scalar + x : scalar + Rotations in radians around z, y, x axes, respectively + + Notes + ----- + If there was no numerical error, the routine could be derived using + Sympy expression for z then y then x rotation matrix, which is:: + + [ cos(y)*cos(z), -cos(y)*sin(z), sin(y)], + [cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)], + [sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)] + + with the obvious derivations for z, y, and x + + z = atan2(-r12, r11) + y = asin(r13) + x = atan2(-r23, r33) + + Problems arise when cos(y) is close to zero, because both of:: + + z = atan2(cos(y)*sin(z), cos(y)*cos(z)) + x = atan2(cos(y)*sin(x), cos(x)*cos(y)) + + will be close to atan2(0, 0), and highly unstable. + + The ``cy`` fix for numerical instability below is from: *Graphics + Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN: + 0123361559. Specifically it comes from EulerAngles.c by Ken + Shoemake, and deals with the case where cos(y) is close to zero: + + See: http://www.graphicsgems.org/ + + The code appears to be licensed (from the website) as "can be used + without restrictions". + ''' + M = np.asarray(M) + if cy_thresh is None: + try: + cy_thresh = np.finfo(M.dtype).eps * 4 + except ValueError: + cy_thresh = _FLOAT_EPS_4 + r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat + # cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2) + cy = math.sqrt(r33*r33 + r23*r23) + if cy > cy_thresh: # cos(y) not close to zero, standard form + z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z)) + y = math.atan2(r13, cy) # atan2(sin(y), cy) + x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y)) + else: # cos(y) (close to) zero, so x -> 0.0 (see above) + # so r21 -> sin(z), r22 -> cos(z) and + z = math.atan2(r21, r22) + y = math.atan2(r13, cy) # atan2(sin(y), cy) + x = 0.0 + return z, y, x + + +def euler2quat(z=0, y=0, x=0): + ''' Return quaternion corresponding to these Euler angles + + Uses the z, then y, then x convention above + + Parameters + ---------- + z : scalar + Rotation angle in radians around z-axis (performed first) + y : scalar + Rotation angle in radians around y-axis + x : scalar + Rotation angle in radians around x-axis (performed last) + + Returns + ------- + quat : array shape (4,) + Quaternion in w, x, y z (real, then vector) format + + Notes + ----- + We can derive this formula in Sympy using: + + 1. Formula giving quaternion corresponding to rotation of theta radians + about arbitrary axis: + http://mathworld.wolfram.com/EulerParameters.html + 2. Generated formulae from 1.) for quaternions corresponding to + theta radians rotations about ``x, y, z`` axes + 3. Apply quaternion multiplication formula - + http://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to + formulae from 2.) to give formula for combined rotations. + ''' + z = z/2.0 + y = y/2.0 + x = x/2.0 + cz = math.cos(z) + sz = math.sin(z) + cy = math.cos(y) + sy = math.sin(y) + cx = math.cos(x) + sx = math.sin(x) + return np.array([ + cx*cy*cz - sx*sy*sz, + cx*sy*sz + cy*cz*sx, + cx*cz*sy - sx*cy*sz, + cx*cy*sz + sx*cz*sy]) + + +def quat2euler(q): + ''' Return Euler angles corresponding to quaternion `q` + + Parameters + ---------- + q : 4 element sequence + w, x, y, z of quaternion + + Returns + ------- + z : scalar + Rotation angle in radians around z-axis (performed first) + y : scalar + Rotation angle in radians around y-axis + x : scalar + Rotation angle in radians around x-axis (performed last) + + Notes + ----- + It's possible to reduce the amount of calculation a little, by + combining parts of the ``quat2mat`` and ``mat2euler`` functions, but + the reduction in computation is small, and the code repetition is + large. + ''' + # delayed import to avoid cyclic dependencies + import nibabel.quaternions as nq + return mat2euler(nq.quat2mat(q)) + + +def euler2angle_axis(z=0, y=0, x=0): + ''' Return angle, axis corresponding to these Euler angles + + Uses the z, then y, then x convention above + + Parameters + ---------- + z : scalar + Rotation angle in radians around z-axis (performed first) + y : scalar + Rotation angle in radians around y-axis + x : scalar + Rotation angle in radians around x-axis (performed last) + + Returns + ------- + theta : scalar + angle of rotation + vector : array shape (3,) + axis around which rotation occurs + + Examples + -------- + >>> theta, vec = euler2angle_axis(0, 1.5, 0) + >>> print(theta) + 1.5 + >>> np.allclose(vec, [0, 1, 0]) + True + ''' + # delayed import to avoid cyclic dependencies + import nibabel.quaternions as nq + return nq.quat2angle_axis(euler2quat(z, y, x)) + + +def angle_axis2euler(theta, vector, is_normalized=False): + ''' Convert angle, axis pair to Euler angles + + Parameters + ---------- + theta : scalar + angle of rotation + vector : 3 element sequence + vector specifying axis for rotation. + is_normalized : bool, optional + True if vector is already normalized (has norm of 1). Default + False + + Returns + ------- + z : scalar + y : scalar + x : scalar + Rotations in radians around z, y, x axes, respectively + + Examples + -------- + >>> z, y, x = angle_axis2euler(0, [1, 0, 0]) + >>> np.allclose((z, y, x), 0) + True + + Notes + ----- + It's possible to reduce the amount of calculation a little, by + combining parts of the ``angle_axis2mat`` and ``mat2euler`` + functions, but the reduction in computation is small, and the code + repetition is large. + ''' + # delayed import to avoid cyclic dependencies + import nibabel.quaternions as nq + M = nq.angle_axis2mat(theta, vector, is_normalized) + return mat2euler(M) diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/pc_util.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/pc_util.py new file mode 100644 index 000000000..c23728ef5 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/pc_util.py @@ -0,0 +1,227 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" Utility functions for processing point clouds. + +Author: Charles R. Qi, Hao Su +Date: November 2016 +""" + +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +# Draw point cloud +from eulerangles import euler2mat + +# Point cloud IO +import numpy as np +from plyfile import PlyData, PlyElement + + +# ---------------------------------------- +# Point Cloud/Volume Conversions +# ---------------------------------------- + +def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True): + """ Input is BxNx3 batch of point cloud + Output is Bx(vsize^3) + """ + vol_list = [] + for b in range(point_clouds.shape[0]): + vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius) + if flatten: + vol_list.append(vol.flatten()) + else: + vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0)) + if flatten: + return np.vstack(vol_list) + else: + return np.concatenate(vol_list, 0) + + +def point_cloud_to_volume(points, vsize, radius=1.0): + """ input is Nx3 points. + output is vsize*vsize*vsize + assumes points are in range [-radius, radius] + """ + vol = np.zeros((vsize,vsize,vsize)) + voxel = 2*radius/float(vsize) + locations = (points + radius)/voxel + locations = locations.astype(int) + vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0 + return vol + +#a = np.zeros((16,1024,3)) +#print point_cloud_to_volume_batch(a, 12, 1.0, False).shape + +def volume_to_point_cloud(vol): + """ vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize + return Nx3 numpy array. + """ + vsize = vol.shape[0] + assert(vol.shape[1] == vsize and vol.shape[1] == vsize) + points = [] + for a in range(vsize): + for b in range(vsize): + for c in range(vsize): + if vol[a,b,c] == 1: + points.append(np.array([a,b,c])) + if len(points) == 0: + return np.zeros((0,3)) + points = np.vstack(points) + return points + +# ---------------------------------------- +# Point cloud IO +# ---------------------------------------- + +def read_ply(filename): + """ read XYZ point cloud from filename PLY file """ + plydata = PlyData.read(filename) + pc = plydata['vertex'].data + pc_array = np.array([[x, y, z] for x,y,z in pc]) + return pc_array + + +def write_ply(points, filename, text=True): + """ input: Nx3, write points to filename as PLY format. """ + points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])] + vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')]) + el = PlyElement.describe(vertex, 'vertex', comments=['vertices']) + PlyData([el], text=text).write(filename) + + +# ---------------------------------------- +# Simple Point cloud and Volume Renderers +# ---------------------------------------- + +def draw_point_cloud(input_points, canvasSize=500, space=200, diameter=25, + xrot=0, yrot=0, zrot=0, switch_xyz=[0,1,2], normalize=True): + """ Render point cloud to image with alpha channel. + Input: + points: Nx3 numpy array (+y is up direction) + Output: + gray image as numpy array of size canvasSizexcanvasSize + """ + image = np.zeros((canvasSize, canvasSize)) + if input_points is None or input_points.shape[0] == 0: + return image + + points = input_points[:, switch_xyz] + M = euler2mat(zrot, yrot, xrot) + points = (np.dot(M, points.transpose())).transpose() + + # Normalize the point cloud + # We normalize scale to fit points in a unit sphere + if normalize: + centroid = np.mean(points, axis=0) + points -= centroid + furthest_distance = np.max(np.sqrt(np.sum(abs(points)**2,axis=-1))) + points /= furthest_distance + + # Pre-compute the Gaussian disk + radius = (diameter-1)/2.0 + disk = np.zeros((diameter, diameter)) + for i in range(diameter): + for j in range(diameter): + if (i - radius) * (i-radius) + (j-radius) * (j-radius) <= radius * radius: + disk[i, j] = np.exp((-(i-radius)**2 - (j-radius)**2)/(radius**2)) + mask = np.argwhere(disk > 0) + dx = mask[:, 0] + dy = mask[:, 1] + dv = disk[disk > 0] + + # Order points by z-buffer + zorder = np.argsort(points[:, 2]) + points = points[zorder, :] + points[:, 2] = (points[:, 2] - np.min(points[:, 2])) / (np.max(points[:, 2] - np.min(points[:, 2]))) + max_depth = np.max(points[:, 2]) + + for i in range(points.shape[0]): + j = points.shape[0] - i - 1 + x = points[j, 0] + y = points[j, 1] + xc = canvasSize/2 + (x*space) + yc = canvasSize/2 + (y*space) + xc = int(np.round(xc)) + yc = int(np.round(yc)) + + px = dx + xc + py = dy + yc + + image[px, py] = image[px, py] * 0.7 + dv * (max_depth - points[j, 2]) * 0.3 + + image = image / np.max(image) + return image + +def point_cloud_three_views(points): + """ input points Nx3 numpy array (+y is up direction). + return an numpy array gray image of size 500x1500. """ + # +y is up direction + # xrot is azimuth + # yrot is in-plane + # zrot is elevation + img1 = draw_point_cloud(points, zrot=110/180.0*np.pi, xrot=45/180.0*np.pi, yrot=0/180.0*np.pi) + img2 = draw_point_cloud(points, zrot=70/180.0*np.pi, xrot=135/180.0*np.pi, yrot=0/180.0*np.pi) + img3 = draw_point_cloud(points, zrot=180.0/180.0*np.pi, xrot=90/180.0*np.pi, yrot=0/180.0*np.pi) + image_large = np.concatenate([img1, img2, img3], 1) + return image_large + + +from PIL import Image +def point_cloud_three_views_demo(): + """ Demo for draw_point_cloud function """ + points = read_ply('../third_party/mesh_sampling/piano.ply') + im_array = point_cloud_three_views(points) + img = Image.fromarray(np.uint8(im_array*255.0)) + img.save('piano.jpg') + +if __name__=="__main__": + point_cloud_three_views_demo() + + +import matplotlib.pyplot as plt +def pyplot_draw_point_cloud(points, output_filename): + """ points is a Nx3 numpy array """ + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + ax.scatter(points[:,0], points[:,1], points[:,2]) + ax.set_xlabel('x') + ax.set_ylabel('y') + ax.set_zlabel('z') + #savefig(output_filename) + +def pyplot_draw_volume(vol, output_filename): + """ vol is of size vsize*vsize*vsize + output an image to output_filename + """ + points = volume_to_point_cloud(vol) + pyplot_draw_point_cloud(points, output_filename) diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/plyfile.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/plyfile.py new file mode 100644 index 000000000..206a2c3ca --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/plyfile.py @@ -0,0 +1,932 @@ +# Copyright 2014 Darsh Ranjan +# +# This file is part of python-plyfile. +# +# python-plyfile is free software: you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# python-plyfile is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# along with python-plyfile. If not, see +# . + +from itertools import islice as _islice + +import numpy as _np +from sys import byteorder as _byteorder + + +try: + _range = range +except NameError: + _range = range + + +# Many-many relation +_data_type_relation = [ + ('int8', 'i1'), + ('char', 'i1'), + ('uint8', 'u1'), + ('uchar', 'b1'), + ('uchar', 'u1'), + ('int16', 'i2'), + ('short', 'i2'), + ('uint16', 'u2'), + ('ushort', 'u2'), + ('int32', 'i4'), + ('int', 'i4'), + ('uint32', 'u4'), + ('uint', 'u4'), + ('float32', 'f4'), + ('float', 'f4'), + ('float64', 'f8'), + ('double', 'f8') +] + +_data_types = dict(_data_type_relation) +_data_type_reverse = dict((b, a) for (a, b) in _data_type_relation) + +_types_list = [] +_types_set = set() +for (_a, _b) in _data_type_relation: + if _a not in _types_set: + _types_list.append(_a) + _types_set.add(_a) + if _b not in _types_set: + _types_list.append(_b) + _types_set.add(_b) + + +_byte_order_map = { + 'ascii': '=', + 'binary_little_endian': '<', + 'binary_big_endian': '>' +} + +_byte_order_reverse = { + '<': 'binary_little_endian', + '>': 'binary_big_endian' +} + +_native_byte_order = {'little': '<', 'big': '>'}[_byteorder] + + +def _lookup_type(type_str): + if type_str not in _data_type_reverse: + try: + type_str = _data_types[type_str] + except KeyError: + raise ValueError("field type %r not in %r" % + (type_str, _types_list)) + + return _data_type_reverse[type_str] + + +def _split_line(line, n): + fields = line.split(None, n) + if len(fields) == n: + fields.append('') + + assert len(fields) == n + 1 + + return fields + + +def make2d(array, cols=None, dtype=None): + ''' + Make a 2D array from an array of arrays. The `cols' and `dtype' + arguments can be omitted if the array is not empty. + + ''' + if (cols is None or dtype is None) and not len(array): + raise RuntimeError("cols and dtype must be specified for empty " + "array") + + if cols is None: + cols = len(array[0]) + + if dtype is None: + dtype = array[0].dtype + + return _np.fromiter(array, [('_', dtype, (cols,))], + count=len(array))['_'] + + +class PlyParseError(Exception): + + ''' + Raised when a PLY file cannot be parsed. + + The attributes `element', `row', `property', and `message' give + additional information. + + ''' + + def __init__(self, message, element=None, row=None, prop=None): + self.message = message + self.element = element + self.row = row + self.prop = prop + + s = '' + if self.element: + s += 'element %r: ' % self.element.name + if self.row is not None: + s += 'row %d: ' % self.row + if self.prop: + s += 'property %r: ' % self.prop.name + s += self.message + + Exception.__init__(self, s) + + def __repr__(self): + return ('PlyParseError(%r, element=%r, row=%r, prop=%r)' % + self.message, self.element, self.row, self.prop) + + +class PlyData(object): + + ''' + PLY file header and data. + + A PlyData instance is created in one of two ways: by the static + method PlyData.read (to read a PLY file), or directly from __init__ + given a sequence of elements (which can then be written to a PLY + file). + + ''' + + def __init__(self, elements=[], text=False, byte_order='=', + comments=[], obj_info=[]): + ''' + elements: sequence of PlyElement instances. + + text: whether the resulting PLY file will be text (True) or + binary (False). + + byte_order: '<' for little-endian, '>' for big-endian, or '=' + for native. This is only relevant if `text' is False. + + comments: sequence of strings that will be placed in the header + between the 'ply' and 'format ...' lines. + + obj_info: like comments, but will be placed in the header with + "obj_info ..." instead of "comment ...". + + ''' + if byte_order == '=' and not text: + byte_order = _native_byte_order + + self.byte_order = byte_order + self.text = text + + self.comments = list(comments) + self.obj_info = list(obj_info) + self.elements = elements + + def _get_elements(self): + return self._elements + + def _set_elements(self, elements): + self._elements = tuple(elements) + self._index() + + elements = property(_get_elements, _set_elements) + + def _get_byte_order(self): + return self._byte_order + + def _set_byte_order(self, byte_order): + if byte_order not in ['<', '>', '=']: + raise ValueError("byte order must be '<', '>', or '='") + + self._byte_order = byte_order + + byte_order = property(_get_byte_order, _set_byte_order) + + def _index(self): + self._element_lookup = dict((elt.name, elt) for elt in + self._elements) + if len(self._element_lookup) != len(self._elements): + raise ValueError("two elements with same name") + + @staticmethod + def _parse_header(stream): + ''' + Parse a PLY header from a readable file-like stream. + + ''' + lines = [] + comments = {'comment': [], 'obj_info': []} + while True: + line = stream.readline().decode('ascii').strip() + fields = _split_line(line, 1) + + if fields[0] == 'end_header': + break + + elif fields[0] in comments.keys(): + lines.append(fields) + else: + lines.append(line.split()) + + a = 0 + if lines[a] != ['ply']: + raise PlyParseError("expected 'ply'") + + a += 1 + while lines[a][0] in comments.keys(): + comments[lines[a][0]].append(lines[a][1]) + a += 1 + + if lines[a][0] != 'format': + raise PlyParseError("expected 'format'") + + if lines[a][2] != '1.0': + raise PlyParseError("expected version '1.0'") + + if len(lines[a]) != 3: + raise PlyParseError("too many fields after 'format'") + + fmt = lines[a][1] + + if fmt not in _byte_order_map: + raise PlyParseError("don't understand format %r" % fmt) + + byte_order = _byte_order_map[fmt] + text = fmt == 'ascii' + + a += 1 + while a < len(lines) and lines[a][0] in comments.keys(): + comments[lines[a][0]].append(lines[a][1]) + a += 1 + + return PlyData(PlyElement._parse_multi(lines[a:]), + text, byte_order, + comments['comment'], comments['obj_info']) + + @staticmethod + def read(stream): + ''' + Read PLY data from a readable file-like object or filename. + + ''' + (must_close, stream) = _open_stream(stream, 'read') + try: + data = PlyData._parse_header(stream) + for elt in data: + elt._read(stream, data.text, data.byte_order) + finally: + if must_close: + stream.close() + + return data + + def write(self, stream): + ''' + Write PLY data to a writeable file-like object or filename. + + ''' + (must_close, stream) = _open_stream(stream, 'write') + try: + stream.write(self.header.encode('ascii')) + stream.write(b'\r\n') + for elt in self: + elt._write(stream, self.text, self.byte_order) + finally: + if must_close: + stream.close() + + @property + def header(self): + ''' + Provide PLY-formatted metadata for the instance. + + ''' + lines = ['ply'] + + if self.text: + lines.append('format ascii 1.0') + else: + lines.append('format ' + + _byte_order_reverse[self.byte_order] + + ' 1.0') + + # Some information is lost here, since all comments are placed + # between the 'format' line and the first element. + for c in self.comments: + lines.append('comment ' + c) + + for c in self.obj_info: + lines.append('obj_info ' + c) + + lines.extend(elt.header for elt in self.elements) + lines.append('end_header') + return '\r\n'.join(lines) + + def __iter__(self): + return iter(self.elements) + + def __len__(self): + return len(self.elements) + + def __contains__(self, name): + return name in self._element_lookup + + def __getitem__(self, name): + return self._element_lookup[name] + + def __str__(self): + return self.header + + def __repr__(self): + return ('PlyData(%r, text=%r, byte_order=%r, ' + 'comments=%r, obj_info=%r)' % + (self.elements, self.text, self.byte_order, + self.comments, self.obj_info)) + + +def _open_stream(stream, read_or_write): + if hasattr(stream, read_or_write): + return (False, stream) + try: + return (True, open(stream, read_or_write[0] + 'b')) + except TypeError: + raise RuntimeError("expected open file or filename") + + +class PlyElement(object): + + ''' + PLY file element. + + A client of this library doesn't normally need to instantiate this + directly, so the following is only for the sake of documenting the + internals. + + Creating a PlyElement instance is generally done in one of two ways: + as a byproduct of PlyData.read (when reading a PLY file) and by + PlyElement.describe (before writing a PLY file). + + ''' + + def __init__(self, name, properties, count, comments=[]): + ''' + This is not part of the public interface. The preferred methods + of obtaining PlyElement instances are PlyData.read (to read from + a file) and PlyElement.describe (to construct from a numpy + array). + + ''' + self._name = str(name) + self._check_name() + self._count = count + + self._properties = tuple(properties) + self._index() + + self.comments = list(comments) + + self._have_list = any(isinstance(p, PlyListProperty) + for p in self.properties) + + @property + def count(self): + return self._count + + def _get_data(self): + return self._data + + def _set_data(self, data): + self._data = data + self._count = len(data) + self._check_sanity() + + data = property(_get_data, _set_data) + + def _check_sanity(self): + for prop in self.properties: + if prop.name not in self._data.dtype.fields: + raise ValueError("dangling property %r" % prop.name) + + def _get_properties(self): + return self._properties + + def _set_properties(self, properties): + self._properties = tuple(properties) + self._check_sanity() + self._index() + + properties = property(_get_properties, _set_properties) + + def _index(self): + self._property_lookup = dict((prop.name, prop) + for prop in self._properties) + if len(self._property_lookup) != len(self._properties): + raise ValueError("two properties with same name") + + def ply_property(self, name): + return self._property_lookup[name] + + @property + def name(self): + return self._name + + def _check_name(self): + if any(c.isspace() for c in self._name): + msg = "element name %r contains spaces" % self._name + raise ValueError(msg) + + def dtype(self, byte_order='='): + ''' + Return the numpy dtype of the in-memory representation of the + data. (If there are no list properties, and the PLY format is + binary, then this also accurately describes the on-disk + representation of the element.) + + ''' + return [(prop.name, prop.dtype(byte_order)) + for prop in self.properties] + + @staticmethod + def _parse_multi(header_lines): + ''' + Parse a list of PLY element definitions. + + ''' + elements = [] + while header_lines: + (elt, header_lines) = PlyElement._parse_one(header_lines) + elements.append(elt) + + return elements + + @staticmethod + def _parse_one(lines): + ''' + Consume one element definition. The unconsumed input is + returned along with a PlyElement instance. + + ''' + a = 0 + line = lines[a] + + if line[0] != 'element': + raise PlyParseError("expected 'element'") + if len(line) > 3: + raise PlyParseError("too many fields after 'element'") + if len(line) < 3: + raise PlyParseError("too few fields after 'element'") + + (name, count) = (line[1], int(line[2])) + + comments = [] + properties = [] + while True: + a += 1 + if a >= len(lines): + break + + if lines[a][0] == 'comment': + comments.append(lines[a][1]) + elif lines[a][0] == 'property': + properties.append(PlyProperty._parse_one(lines[a])) + else: + break + + return (PlyElement(name, properties, count, comments), + lines[a:]) + + @staticmethod + def describe(data, name, len_types={}, val_types={}, + comments=[]): + ''' + Construct a PlyElement from an array's metadata. + + len_types and val_types can be given as mappings from list + property names to type strings (like 'u1', 'f4', etc., or + 'int8', 'float32', etc.). These can be used to define the length + and value types of list properties. List property lengths + always default to type 'u1' (8-bit unsigned integer), and value + types default to 'i4' (32-bit integer). + + ''' + if not isinstance(data, _np.ndarray): + raise TypeError("only numpy arrays are supported") + + if len(data.shape) != 1: + raise ValueError("only one-dimensional arrays are " + "supported") + + count = len(data) + + properties = [] + descr = data.dtype.descr + + for t in descr: + if not isinstance(t[1], str): + raise ValueError("nested records not supported") + + if not t[0]: + raise ValueError("field with empty name") + + if len(t) != 2 or t[1][1] == 'O': + # non-scalar field, which corresponds to a list + # property in PLY. + + if t[1][1] == 'O': + if len(t) != 2: + raise ValueError("non-scalar object fields not " + "supported") + + len_str = _data_type_reverse[len_types.get(t[0], 'u1')] + if t[1][1] == 'O': + val_type = val_types.get(t[0], 'i4') + val_str = _lookup_type(val_type) + else: + val_str = _lookup_type(t[1][1:]) + + prop = PlyListProperty(t[0], len_str, val_str) + else: + val_str = _lookup_type(t[1][1:]) + prop = PlyProperty(t[0], val_str) + + properties.append(prop) + + elt = PlyElement(name, properties, count, comments) + elt.data = data + + return elt + + def _read(self, stream, text, byte_order): + ''' + Read the actual data from a PLY file. + + ''' + if text: + self._read_txt(stream) + else: + if self._have_list: + # There are list properties, so a simple load is + # impossible. + self._read_bin(stream, byte_order) + else: + # There are no list properties, so loading the data is + # much more straightforward. + self._data = _np.fromfile(stream, + self.dtype(byte_order), + self.count) + + if len(self._data) < self.count: + k = len(self._data) + del self._data + raise PlyParseError("early end-of-file", self, k) + + self._check_sanity() + + def _write(self, stream, text, byte_order): + ''' + Write the data to a PLY file. + + ''' + if text: + self._write_txt(stream) + else: + if self._have_list: + # There are list properties, so serialization is + # slightly complicated. + self._write_bin(stream, byte_order) + else: + # no list properties, so serialization is + # straightforward. + self.data.astype(self.dtype(byte_order), + copy=False).tofile(stream) + + def _read_txt(self, stream): + ''' + Load a PLY element from an ASCII-format PLY file. The element + may contain list properties. + + ''' + self._data = _np.empty(self.count, dtype=self.dtype()) + + k = 0 + for line in _islice(iter(stream.readline, b''), self.count): + fields = iter(line.strip().split()) + for prop in self.properties: + try: + self._data[prop.name][k] = prop._from_fields(fields) + except StopIteration: + raise PlyParseError("early end-of-line", + self, k, prop) + except ValueError: + raise PlyParseError("malformed input", + self, k, prop) + try: + next(fields) + except StopIteration: + pass + else: + raise PlyParseError("expected end-of-line", self, k) + k += 1 + + if k < self.count: + del self._data + raise PlyParseError("early end-of-file", self, k) + + def _write_txt(self, stream): + ''' + Save a PLY element to an ASCII-format PLY file. The element may + contain list properties. + + ''' + for rec in self.data: + fields = [] + for prop in self.properties: + fields.extend(prop._to_fields(rec[prop.name])) + + _np.savetxt(stream, [fields], '%.18g', newline='\r\n') + + def _read_bin(self, stream, byte_order): + ''' + Load a PLY element from a binary PLY file. The element may + contain list properties. + + ''' + self._data = _np.empty(self.count, dtype=self.dtype(byte_order)) + + for k in _range(self.count): + for prop in self.properties: + try: + self._data[prop.name][k] = \ + prop._read_bin(stream, byte_order) + except StopIteration: + raise PlyParseError("early end-of-file", + self, k, prop) + + def _write_bin(self, stream, byte_order): + ''' + Save a PLY element to a binary PLY file. The element may + contain list properties. + + ''' + for rec in self.data: + for prop in self.properties: + prop._write_bin(rec[prop.name], stream, byte_order) + + @property + def header(self): + ''' + Format this element's metadata as it would appear in a PLY + header. + + ''' + lines = ['element %s %d' % (self.name, self.count)] + + # Some information is lost here, since all comments are placed + # between the 'element' line and the first property definition. + for c in self.comments: + lines.append('comment ' + c) + + lines.extend(list(map(str, self.properties))) + + return '\r\n'.join(lines) + + def __getitem__(self, key): + return self.data[key] + + def __setitem__(self, key, value): + self.data[key] = value + + def __str__(self): + return self.header + + def __repr__(self): + return ('PlyElement(%r, %r, count=%d, comments=%r)' % + (self.name, self.properties, self.count, + self.comments)) + + +class PlyProperty(object): + + ''' + PLY property description. This class is pure metadata; the data + itself is contained in PlyElement instances. + + ''' + + def __init__(self, name, val_dtype): + self._name = str(name) + self._check_name() + self.val_dtype = val_dtype + + def _get_val_dtype(self): + return self._val_dtype + + def _set_val_dtype(self, val_dtype): + self._val_dtype = _data_types[_lookup_type(val_dtype)] + + val_dtype = property(_get_val_dtype, _set_val_dtype) + + @property + def name(self): + return self._name + + def _check_name(self): + if any(c.isspace() for c in self._name): + msg = "Error: property name %r contains spaces" % self._name + raise RuntimeError(msg) + + @staticmethod + def _parse_one(line): + assert line[0] == 'property' + + if line[1] == 'list': + if len(line) > 5: + raise PlyParseError("too many fields after " + "'property list'") + if len(line) < 5: + raise PlyParseError("too few fields after " + "'property list'") + + return PlyListProperty(line[4], line[2], line[3]) + + else: + if len(line) > 3: + raise PlyParseError("too many fields after " + "'property'") + if len(line) < 3: + raise PlyParseError("too few fields after " + "'property'") + + return PlyProperty(line[2], line[1]) + + def dtype(self, byte_order='='): + ''' + Return the numpy dtype description for this property (as a tuple + of strings). + + ''' + return byte_order + self.val_dtype + + def _from_fields(self, fields): + ''' + Parse from generator. Raise StopIteration if the property could + not be read. + + ''' + return _np.dtype(self.dtype()).type(next(fields)) + + def _to_fields(self, data): + ''' + Return generator over one item. + + ''' + yield _np.dtype(self.dtype()).type(data) + + def _read_bin(self, stream, byte_order): + ''' + Read data from a binary stream. Raise StopIteration if the + property could not be read. + + ''' + try: + return _np.fromfile(stream, self.dtype(byte_order), 1)[0] + except IndexError: + raise StopIteration + + def _write_bin(self, data, stream, byte_order): + ''' + Write data to a binary stream. + + ''' + _np.dtype(self.dtype(byte_order)).type(data).tofile(stream) + + def __str__(self): + val_str = _data_type_reverse[self.val_dtype] + return 'property %s %s' % (val_str, self.name) + + def __repr__(self): + return 'PlyProperty(%r, %r)' % (self.name, + _lookup_type(self.val_dtype)) + + +class PlyListProperty(PlyProperty): + + ''' + PLY list property description. + + ''' + + def __init__(self, name, len_dtype, val_dtype): + PlyProperty.__init__(self, name, val_dtype) + + self.len_dtype = len_dtype + + def _get_len_dtype(self): + return self._len_dtype + + def _set_len_dtype(self, len_dtype): + self._len_dtype = _data_types[_lookup_type(len_dtype)] + + len_dtype = property(_get_len_dtype, _set_len_dtype) + + def dtype(self, byte_order='='): + ''' + List properties always have a numpy dtype of "object". + + ''' + return '|O' + + def list_dtype(self, byte_order='='): + ''' + Return the pair (len_dtype, val_dtype) (both numpy-friendly + strings). + + ''' + return (byte_order + self.len_dtype, + byte_order + self.val_dtype) + + def _from_fields(self, fields): + (len_t, val_t) = self.list_dtype() + + n = int(_np.dtype(len_t).type(next(fields))) + + data = _np.loadtxt(list(_islice(fields, n)), val_t, ndmin=1) + if len(data) < n: + raise StopIteration + + return data + + def _to_fields(self, data): + ''' + Return generator over the (numerical) PLY representation of the + list data (length followed by actual data). + + ''' + (len_t, val_t) = self.list_dtype() + + data = _np.asarray(data, dtype=val_t).ravel() + + yield _np.dtype(len_t).type(data.size) + for x in data: + yield x + + def _read_bin(self, stream, byte_order): + (len_t, val_t) = self.list_dtype(byte_order) + + try: + n = _np.fromfile(stream, len_t, 1)[0] + except IndexError: + raise StopIteration + + data = _np.fromfile(stream, val_t, n) + if len(data) < n: + raise StopIteration + + return data + + def _write_bin(self, data, stream, byte_order): + ''' + Write data to a binary stream. + + ''' + (len_t, val_t) = self.list_dtype(byte_order) + + data = _np.asarray(data, dtype=val_t).ravel() + + _np.array(data.size, dtype=len_t).tofile(stream) + data.tofile(stream) + + def __str__(self): + len_str = _data_type_reverse[self.len_dtype] + val_str = _data_type_reverse[self.val_dtype] + return 'property list %s %s %s' % (len_str, val_str, self.name) + + def __repr__(self): + return ('PlyListProperty(%r, %r, %r)' % + (self.name, + _lookup_type(self.len_dtype), + _lookup_type(self.val_dtype))) diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/tf_util.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/tf_util.py new file mode 100644 index 000000000..4ac248baa --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/utils/tf_util.py @@ -0,0 +1,614 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" Wrapper functions for TensorFlow layers. + +Author: Charles R. Qi +Date: November 2016 +""" +import numpy as np +import tensorflow as tf + +# 建立CPU实例:name 变量名,shape 纬度的整形数字列表,initializer初始化变量,返回张量 +def _variable_on_cpu(name, shape, initializer, use_fp16=False): + """Helper to create a Variable stored on CPU memory. + Args: + name: name of the variable + shape: list of ints + initializer: initializer for Variable + Returns: + Variable Tensor + """ + with tf.device('/cpu:0'): + dtype = tf.float16 if use_fp16 else tf.float32 + var = tf.compat.v1.get_variable(name, shape, initializer=initializer, dtype=dtype) + return var + + +# 随权重衰减变化的变量 +def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True): + """Helper to create an initialized Variable with weight decay. + + Note that the Variable is initialized with a truncated normal distribution. + A weight decay is added only if one is specified. + + Args: + name: name of the variable + shape: list of ints + stddev: standard deviation of a truncated Gaussian + wd: add L2Loss weight decay multiplied by this float. If None, weight + decay is not added for this Variable. + use_xavier: bool, whether to use xavier initializer + + Returns: + Variable Tensor + """ + if use_xavier: + initializer = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform") + else: + initializer = tf.compat.v1.truncated_normal_initializer(stddev=stddev) + var = _variable_on_cpu(name, shape, initializer) + if wd is not None: + weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') + tf.compat.v1.add_to_collection('losses', weight_decay) + return var + + +# 一维卷积层 +def conv1d(inputs, + num_output_channels, + kernel_size, + scope, + stride=1, + padding='SAME', + use_xavier=True, + stddev=1e-3, + weight_decay=0.0, + activation_fn=tf.nn.relu, + bn=False, + bn_decay=None, + is_training=None): + """ 1D convolution with non-linear operation. + + Args: + inputs: 3-D tensor variable BxLxC + num_output_channels: int + kernel_size: int + scope: string + stride: int + padding: 'SAME' or 'VALID' + use_xavier: bool, use xavier_initializer if true + stddev: float, stddev for truncated_normal init + weight_decay: float + activation_fn: function + bn: bool, whether to use batch norm + bn_decay: float or float tensor variable in [0,1] + is_training: bool Tensor variable + + Returns: + Variable tensor + """ + with tf.compat.v1.variable_scope(scope) as sc: + num_in_channels = inputs.get_shape()[-1] + kernel_shape = [kernel_size, + num_in_channels, num_output_channels] + kernel = _variable_with_weight_decay('weights', + shape=kernel_shape, + use_xavier=use_xavier, + stddev=stddev, + wd=weight_decay) + outputs = tf.nn.conv1d(input=inputs, filters=kernel, + stride=stride, + padding=padding) + biases = _variable_on_cpu('biases', [num_output_channels], + tf.compat.v1.constant_initializer(0.0)) + outputs = tf.nn.bias_add(outputs, biases) + + if bn: + outputs = batch_norm_for_conv1d(outputs, is_training, + bn_decay=bn_decay, scope='bn') + + if activation_fn is not None: + outputs = activation_fn(outputs) + return outputs + + +# 二维卷积层 +def conv2d(inputs, + num_output_channels, + kernel_size, + scope, + stride=[1, 1], + padding='SAME', + use_xavier=True, + stddev=1e-3, + weight_decay=0.0, + activation_fn=tf.nn.relu, + bn=False, + bn_decay=None, + is_training=None): + """ 2D convolution with non-linear operation. + + Args: + inputs: 4-D tensor variable BxHxWxC + num_output_channels: int + kernel_size: a list of 2 ints + scope: string + stride: a list of 2 ints + padding: 'SAME' or 'VALID' + use_xavier: bool, use xavier_initializer if true + stddev: float, stddev for truncated_normal init + weight_decay: float + activation_fn: function + bn: bool, whether to use batch norm + bn_decay: float or float tensor variable in [0,1] + is_training: bool Tensor variable + + Returns: + Variable tensor + """ + with tf.compat.v1.variable_scope(scope) as sc: + kernel_h, kernel_w = kernel_size + num_in_channels = inputs.get_shape()[-1] + kernel_shape = [kernel_h, kernel_w, + num_in_channels, num_output_channels] + kernel = _variable_with_weight_decay('weights', + shape=kernel_shape, + use_xavier=use_xavier, + stddev=stddev, + wd=weight_decay) + stride_h, stride_w = stride + outputs = tf.nn.conv2d(input=inputs, filters=kernel, + strides=[1, stride_h, stride_w, 1], + padding=padding) + biases = _variable_on_cpu('biases', [num_output_channels], + tf.compat.v1.constant_initializer(0.0)) + outputs = tf.nn.bias_add(outputs, biases) + + if bn: + outputs = batch_norm_for_conv2d(outputs, is_training, + bn_decay=bn_decay, scope='bn') + + if activation_fn is not None: + outputs = activation_fn(outputs) + return outputs + + +def conv2d_transpose(inputs, + num_output_channels, + kernel_size, + scope, + stride=[1, 1], + padding='SAME', + use_xavier=True, + stddev=1e-3, + weight_decay=0.0, + activation_fn=tf.nn.relu, + bn=False, + bn_decay=None, + is_training=None): + """ 2D convolution transpose with non-linear operation. + + Args: + inputs: 4-D tensor variable BxHxWxC + num_output_channels: int + kernel_size: a list of 2 ints + scope: string + stride: a list of 2 ints + padding: 'SAME' or 'VALID' + use_xavier: bool, use xavier_initializer if true + stddev: float, stddev for truncated_normal init + weight_decay: float + activation_fn: function + bn: bool, whether to use batch norm + bn_decay: float or float tensor variable in [0,1] + is_training: bool Tensor variable + + Returns: + Variable tensor + + Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-1], ksize, stride) == a + """ + with tf.compat.v1.variable_scope(scope) as sc: + kernel_h, kernel_w = kernel_size + num_in_channels = inputs.get_shape()[-1] + kernel_shape = [kernel_h, kernel_w, + num_output_channels, num_in_channels] # reversed to conv2d + kernel = _variable_with_weight_decay('weights', + shape=kernel_shape, + use_xavier=use_xavier, + stddev=stddev, + wd=weight_decay) + stride_h, stride_w = stride + + # from slim.convolution2d_transpose + def get_deconv_dim(dim_size, stride_size, kernel_size, padding): + dim_size *= stride_size + + if padding == 'VALID' and dim_size is not None: + dim_size += max(kernel_size - stride_size, 0) + return dim_size + + # caculate output shape + batch_size = inputs.get_shape()[0] + height = inputs.get_shape()[1] + width = inputs.get_shape()[2] + out_height = get_deconv_dim(height, stride_h, kernel_h, padding) + out_width = get_deconv_dim(width, stride_w, kernel_w, padding) + output_shape = [batch_size, out_height, out_width, num_output_channels] + + outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape, + [1, stride_h, stride_w, 1], + padding=padding) + biases = _variable_on_cpu('biases', [num_output_channels], + tf.compat.v1.constant_initializer(0.0)) + outputs = tf.nn.bias_add(outputs, biases) + + if bn: + outputs = batch_norm_for_conv2d(outputs, is_training, + bn_decay=bn_decay, scope='bn') + + if activation_fn is not None: + outputs = activation_fn(outputs) + return outputs + + +# 三维卷积层 +def conv3d(inputs, + num_output_channels, + kernel_size, + scope, + stride=[1, 1, 1], + padding='SAME', + use_xavier=True, + stddev=1e-3, + weight_decay=0.0, + activation_fn=tf.nn.relu, + bn=False, + bn_decay=None, + is_training=None): + """ 3D convolution with non-linear operation. + + Args: + inputs: 5-D tensor variable BxDxHxWxC + num_output_channels: int + kernel_size: a list of 3 ints + scope: string + stride: a list of 3 ints + padding: 'SAME' or 'VALID' + use_xavier: bool, use xavier_initializer if true + stddev: float, stddev for truncated_normal init + weight_decay: float + activation_fn: function + bn: bool, whether to use batch norm + bn_decay: float or float tensor variable in [0,1] + is_training: bool Tensor variable + + Returns: + Variable tensor + """ + with tf.compat.v1.variable_scope(scope) as sc: + kernel_d, kernel_h, kernel_w = kernel_size + num_in_channels = inputs.get_shape()[-1] + kernel_shape = [kernel_d, kernel_h, kernel_w, + num_in_channels, num_output_channels] + kernel = _variable_with_weight_decay('weights', + shape=kernel_shape, + use_xavier=use_xavier, + stddev=stddev, + wd=weight_decay) + stride_d, stride_h, stride_w = stride + outputs = tf.nn.conv3d(inputs, kernel, + [1, stride_d, stride_h, stride_w, 1], + padding=padding) + biases = _variable_on_cpu('biases', [num_output_channels], + tf.compat.v1.constant_initializer(0.0)) + outputs = tf.nn.bias_add(outputs, biases) + + if bn: + outputs = batch_norm_for_conv3d(outputs, is_training, + bn_decay=bn_decay, scope='bn') + + if activation_fn is not None: + outputs = activation_fn(outputs) + return outputs + + +# 全连接网络 +def fully_connected(inputs, + num_outputs, + scope, + use_xavier=True, + stddev=1e-3, + weight_decay=0.0, + activation_fn=tf.nn.relu, + bn=False, + bn_decay=None, + is_training=None): + """ Fully connected layer with non-linear operation. + + Args: + inputs: 2-D tensor BxN + num_outputs: int + + Returns: + Variable tensor of size B x num_outputs. + """ + with tf.compat.v1.variable_scope(scope) as sc: + num_input_units = inputs.get_shape()[-1] + weights = _variable_with_weight_decay('weights', + shape=[num_input_units, num_outputs], + use_xavier=use_xavier, + stddev=stddev, + wd=weight_decay) + outputs = tf.matmul(inputs, weights) + biases = _variable_on_cpu('biases', [num_outputs], + tf.compat.v1.constant_initializer(0.0)) + outputs = tf.nn.bias_add(outputs, biases) + + if bn: + outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn') + + if activation_fn is not None: + outputs = activation_fn(outputs) + return outputs + + +# 2D最大值池化(2X2) +def max_pool2d(inputs, + kernel_size, + scope, + stride=[2, 2], + padding='VALID'): + """ 2D max pooling. + + Args: + inputs: 4-D tensor BxHxWxC + kernel_size: a list of 2 ints + stride: a list of 2 ints + + Returns: + Variable tensor + """ + with tf.compat.v1.variable_scope(scope) as sc: + kernel_h, kernel_w = kernel_size + stride_h, stride_w = stride + outputs = tf.nn.max_pool2d(input=inputs, + ksize=[1, kernel_h, kernel_w, 1], + strides=[1, stride_h, stride_w, 1], + padding=padding, + name=sc.name) + return outputs + + +# 2D均值池化(2X2) +def avg_pool2d(inputs, + kernel_size, + scope, + stride=[2, 2], + padding='VALID'): + """ 2D avg pooling. + + Args: + inputs: 4-D tensor BxHxWxC + kernel_size: a list of 2 ints + stride: a list of 2 ints + + Returns: + Variable tensor + """ + with tf.compat.v1.variable_scope(scope) as sc: + kernel_h, kernel_w = kernel_size + stride_h, stride_w = stride + outputs = tf.nn.avg_pool2d(input=inputs, + ksize=[1, kernel_h, kernel_w, 1], + strides=[1, stride_h, stride_w, 1], + padding=padding, + name=sc.name) + return outputs + + +# 3D最大值池化(2X2X2) +def max_pool3d(inputs, + kernel_size, + scope, + stride=[2, 2, 2], + padding='VALID'): + """ 3D max pooling. + + Args: + inputs: 5-D tensor BxDxHxWxC + kernel_size: a list of 3 ints + stride: a list of 3 ints + + Returns: + Variable tensor + """ + with tf.compat.v1.variable_scope(scope) as sc: + kernel_d, kernel_h, kernel_w = kernel_size + stride_d, stride_h, stride_w = stride + outputs = tf.nn.max_pool3d(inputs, + ksize=[1, kernel_d, kernel_h, kernel_w, 1], + strides=[1, stride_d, stride_h, stride_w, 1], + padding=padding, + name=sc.name) + return outputs + + +# 3D均值池化(2X2X2) +def avg_pool3d(inputs, + kernel_size, + scope, + stride=[2, 2, 2], + padding='VALID'): + """ 3D avg pooling. + + Args: + inputs: 5-D tensor BxDxHxWxC + kernel_size: a list of 3 ints + stride: a list of 3 ints + + Returns: + Variable tensor + """ + with tf.compat.v1.variable_scope(scope) as sc: + kernel_d, kernel_h, kernel_w = kernel_size + stride_d, stride_h, stride_w = stride + outputs = tf.nn.avg_pool3d(inputs, + ksize=[1, kernel_d, kernel_h, kernel_w, 1], + strides=[1, stride_d, stride_h, stride_w, 1], + padding=padding, + name=sc.name) + return outputs + + +# 批量归一化(模版) +def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay): + """ Batch normalization on convolutional maps and beyond... + Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow + + Args: + inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC + is_training: boolean tf.Varialbe, true indicates training phase + scope: string, variable scope + moments_dims: a list of ints, indicating dimensions for moments calculation + bn_decay: float or float tensor variable, controling moving average weight + Return: + normed: batch-normalized maps + """ + with tf.compat.v1.variable_scope(scope) as sc: + num_channels = inputs.get_shape()[-1] + beta = tf.Variable(tf.constant(0.0, shape=[num_channels]), + name='beta', trainable=True) + gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]), + name='gamma', trainable=True) + batch_mean, batch_var = tf.nn.moments(x=inputs, axes=moments_dims, name='moments') + decay = bn_decay if bn_decay is not None else 0.9 + ema = tf.train.ExponentialMovingAverage(decay=decay) + # Operator that maintains moving averages of variables. + ema_apply_op = tf.cond(pred=is_training, + true_fn=lambda: ema.apply([batch_mean, batch_var]), + false_fn=lambda: tf.no_op()) + + # Update moving average and return current batch's avg and var. + def mean_var_with_update(): + with tf.control_dependencies([ema_apply_op]): + return tf.identity(batch_mean), tf.identity(batch_var) + + # ema.average returns the Variable holding the average of var. + mean, var = tf.cond(pred=is_training, + true_fn=mean_var_with_update, + false_fn=lambda: (ema.average(batch_mean), ema.average(batch_var))) + normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3) + return normed + + +# 批量归一化(模版) +def batch_norm_for_fc(inputs, is_training, bn_decay, scope): + """ Batch normalization on FC data. + + Args: + inputs: Tensor, 2D BxC input + is_training: boolean tf.Varialbe, true indicates training phase + bn_decay: float or float tensor variable, controling moving average weight + scope: string, variable scope + Return: + normed: batch-normalized maps + """ + return batch_norm_template(inputs, is_training, scope, [0, ], bn_decay) + + +# 批量归一化(一维卷积层) +def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope): + """ Batch normalization on 1D convolutional maps. + + Args: + inputs: Tensor, 3D BLC input maps + is_training: boolean tf.Varialbe, true indicates training phase + bn_decay: float or float tensor variable, controling moving average weight + scope: string, variable scope + Return: + normed: batch-normalized maps + """ + return batch_norm_template(inputs, is_training, scope, [0, 1], bn_decay) + + +# 批量归一化(二维卷积层) +def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope): + """ Batch normalization on 2D convolutional maps. + + Args: + inputs: Tensor, 4D BHWC input maps + is_training: boolean tf.Varialbe, true indicates training phase + bn_decay: float or float tensor variable, controling moving average weight + scope: string, variable scope + Return: + normed: batch-normalized maps + """ + return batch_norm_template(inputs, is_training, scope, [0, 1, 2], bn_decay) + + +# 批量归一化(三维卷积层) +def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope): + """ Batch normalization on 3D convolutional maps. + + Args: + inputs: Tensor, 5D BDHWC input maps + is_training: boolean tf.Varialbe, true indicates training phase + bn_decay: float or float tensor variable, controling moving average weight + scope: string, variable scope + Return: + normed: batch-normalized maps + """ + return batch_norm_template(inputs, is_training, scope, [0, 1, 2, 3], bn_decay) + + +# 舍弃部分神经元连接,降低过拟合(预设50%) +def dropout(inputs, + is_training, + scope, + keep_prob=0.5, + noise_shape=None): + """ Dropout layer. + + Args: + inputs: tensor + is_training: boolean tf.Variable + scope: string + keep_prob: float in [0,1] + noise_shape: list of ints + + Returns: + tensor variable + """ + with tf.compat.v1.variable_scope(scope) as sc: + outputs = tf.cond(pred=is_training, + true_fn=lambda: tf.nn.dropout(inputs, (1 - keep_prob), noise_shape), + false_fn=lambda: inputs) + return outputs -- Gitee From 778498fdb2d891cfc68173297a2d8b1ebb36e440 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:32:02 +0000 Subject: [PATCH 23/54] =?UTF-8?q?PointNet=5FID2913=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../test/train_full_1p.sh | 179 +++++++++++++++++ .../test/train_performance_1p.sh | 186 ++++++++++++++++++ 2 files changed, 365 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_performance_1p.sh diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..3cfc94739 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,179 @@ +#!/bin/bash + +#current path, no revsion +cur_path=`pwd` + +#ENV +#this is necessary for lib.so.103 +#export LD_LIBRARY_PATH=/usr/include/h5py/lib:$LD_LIBRARY_PATH + +#HCCL params, no revision +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=0 + +# 数据集路径,保持为空,不需要修改 +data_path="" +ckpt_path="" +#设置默认日志级别,不需要修改 +#export ASCEND_GLOBAL_LOG_LEVEL=3 +#export ASCEND_GLOBAL_EVENT_ENABLE=1 + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="PointNet_ID2913_for_TensorFlow2.X" +#训练epoch +train_epochs=250 +#训练batch_size +batch_size=32 +#训练step +train_steps=0 +#学习率 +learning_rate=0.0015 +num_point=2048 + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $RANK_ID" + export RANK_ID=$RANK_ID + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + #--data_dir, --model_dir, --precision_mode, --over_dump, --over_dump_path,--data_dump_flag,--data_dump_step,--data_dump_path,--profiling,--profiling_dump_path,--autotune + nohup python3 ${cur_path}/../train.py \ + --log_dir=${cur_path}/output/$ASCEND_DEVICE_ID/ckpt \ + --num_point=2048 \ + --data_path=${data_path} \ + --batch_size=32 \ + --learning_rate=0.0015 \ + --max_epoch=250 \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & + +done +wait + +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +FPS=`grep FPS ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END{print $NF}'` + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=` grep 'eval accuracy' ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END{print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" +echo "E2E Training Duration sec : $e2e_time" + +#稳定性精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据 +#吞吐量,不需要修改 +ActualFPS=${FPS} +#单迭代训练时长,不需要修改 +TrainingTime=`grep TOTLE_TIME ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END{print $NF}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '^mean loss' ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk '{print $NF}' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk '{print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt|tail -n 1` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..72e349b70 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,186 @@ +#!/bin/bash + +#current path, no revsion +cur_path=`pwd` + +#ENV +#this is necessary for lib.so.103 +#export LD_LIBRARY_PATH=/usr/include/h5py/lib:$LD_LIBRARY_PATH + +#HCCL params, no revision +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=0 + +# 数据集路径,保持为空,不需要修改 +data_path="" +ckpt_path="" +#设置默认日志级别,不需要修改 +#export ASCEND_GLOBAL_LOG_LEVEL=3 +#export ASCEND_GLOBAL_EVENT_ENABLE=1 + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="PointNet_ID2913_for_TensorFlow2.X" +#训练epoch +train_epochs=1 +#训练batch_size +batch_size=32 +#训练step +train_steps=0 +#学习率 +learning_rate=0.0015 +num_point=2048 + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $RANK_ID" + export RANK_ID=$RANK_ID + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + #--data_dir, --model_dir, --precision_mode, --over_dump, --over_dump_path,--data_dump_flag,--data_dump_step,--data_dump_path,--profiling,--profiling_dump_path,--autotune + nohup python3 train.py \ + --log_dir=${cur_path}/output/$ASCEND_DEVICE_ID/ckpt \ + --num_point=2048 \ + --data_path=${data_path} \ + --batch_size=32 \ + --learning_rate=0.0015 \ + --max_epoch=10 \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & + +done +wait + +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +# epoch_sec=`grep -a 'epoch time: ' ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $3}'` + +# FPS=`awk -v x=1 -v y="$epoch_sec" 'BEGIN{printf "%.2f\n",x/y}'` +# shapes=`awk -v x=5 -v y=2048 'BEGIN{printf "%.2f\n",x*y}'` +# FPS=`awk -v x="$shapes" -v y="$FPS" 'BEGIN{printf "%.2f\n",x*y}'` +FPS=`grep FPS ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END{print $NF}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +# train_accuracy="null" +train_accuracy=` grep 'eval accuracy' ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END{print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" +echo "E2E Training Duration sec : $e2e_time" + +#稳定性精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据 +#吞吐量,不需要修改 +ActualFPS=${FPS} +#单迭代训练时长,不需要修改 +TrainingTime=`grep TOTLE_TIME ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END{print $NF}'` +# TrainingTime=`awk -v x=320 -v y="$epoch_sec" 'BEGIN{printf "%.3f\n",y/x}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +# grep "loss:" ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk '{print $3}'|sed 's/.$//' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +grep '^mean loss' ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk '{print $NF}' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk '{print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt|tail -n 1` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file -- Gitee From a76e2fa021a627cf6cd038b21744524b9503c2d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:32:19 +0000 Subject: [PATCH 24/54] =?UTF-8?q?PointNet=5FID2913=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../sem_seg/README.md | 36 + .../sem_seg/batch_inference.py | 201 ++++++ .../sem_seg/collect_indoor3d_data.py | 52 ++ .../sem_seg/download_data.sh | 7 + .../sem_seg/eval_iou_accuracy.py | 68 ++ .../sem_seg/gen_indoor3d_h5.py | 115 ++++ .../sem_seg/indoor3d_util.py | 619 ++++++++++++++++++ .../sem_seg/meta/all_data_label.txt | 272 ++++++++ .../sem_seg/meta/anno_paths.txt | 272 ++++++++ .../sem_seg/meta/area6_data_label.txt | 48 ++ .../sem_seg/meta/class_names.txt | 13 + .../sem_seg/model.py | 106 +++ .../sem_seg/train.py | 306 +++++++++ 13 files changed, 2115 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/README.md create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/batch_inference.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/collect_indoor3d_data.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/download_data.sh create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/eval_iou_accuracy.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/gen_indoor3d_h5.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/indoor3d_util.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/all_data_label.txt create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/anno_paths.txt create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/area6_data_label.txt create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/class_names.txt create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/model.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/train.py diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/README.md b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/README.md new file mode 100644 index 000000000..bc4b48b33 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/README.md @@ -0,0 +1,36 @@ +## Semantic Segmentation of Indoor Scenes + +### Dataset + +Donwload prepared HDF5 data for training: + + sh download_data.sh + +(optional) Download 3D indoor parsing dataset (S3DIS Dataset) for testing and visualization. Version 1.2 of the dataset is used in this work. + + +To prepare your own HDF5 data, you need to firstly download 3D indoor parsing dataset and then use `python collect_indoor3d_data.py` for data re-organization and `python gen_indoor3d_h5.py` to generate HDF5 files. + +### Training + +Once you have downloaded prepared HDF5 files or prepared them by yourself, to start training: + + python train.py --log_dir log6 --test_area 6 + +In default a simple model based on vanilla PointNet is used for training. Area 6 is used for test set. + +### Testing + +Testing requires download of 3D indoor parsing data and preprocessing with `collect_indoor3d_data.py` + +After training, use `batch_inference.py` command to segment rooms in test set. In our work we use 6-fold training that trains 6 models. For model1 , area2-6 are used as train set, area1 is used as test set. For model2, area1,3-6 are used as train set and area2 is used as test set... Note that S3DIS dataset paper uses a different 3-fold training, which was not publicly announced at the time of our work. + +For example, to test model6, use command: + + python batch_inference.py --model_path log6/model.ckpt --dump_dir log6/dump --output_filelist log6/output_filelist.txt --room_data_filelist meta/area6_data_label.txt --visu + +Some OBJ files will be created for prediciton visualization in `log6/dump`. + +To evaluate overall segmentation accuracy, we evaluate 6 models on their corresponding test areas and use `eval_iou_accuracy.py` to produce point classification accuracy and IoU as reported in the paper. + + diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/batch_inference.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/batch_inference.py new file mode 100644 index 000000000..67567f4a6 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/batch_inference.py @@ -0,0 +1,201 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = os.path.dirname(BASE_DIR) +sys.path.append(BASE_DIR) +from model import * +import indoor3d_util + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') +parser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training [default: 1]') +parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]') +parser.add_argument('--model_path', required=True, help='model checkpoint file path') +parser.add_argument('--dump_dir', required=True, help='dump folder path') +parser.add_argument('--output_filelist', required=True, help='TXT filename, filelist, each line is an output for a room') +parser.add_argument('--room_data_filelist', required=True, help='TXT filename, filelist, each line is a test room data label file.') +parser.add_argument('--no_clutter', action='store_true', help='If true, donot count the clutter class') +parser.add_argument('--visu', action='store_true', help='Whether to output OBJ file for prediction visualization.') +FLAGS = parser.parse_args() + +BATCH_SIZE = FLAGS.batch_size +NUM_POINT = FLAGS.num_point +MODEL_PATH = FLAGS.model_path +GPU_INDEX = FLAGS.gpu +DUMP_DIR = FLAGS.dump_dir +if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR) +LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w') +LOG_FOUT.write(str(FLAGS)+'\n') +ROOM_PATH_LIST = [os.path.join(ROOT_DIR,line.rstrip()) for line in open(FLAGS.room_data_filelist)] + +NUM_CLASSES = 13 + +def log_string(out_str): + LOG_FOUT.write(out_str+'\n') + LOG_FOUT.flush() + print(out_str) + +def evaluate(): + is_training = False + + with tf.device('/gpu:'+str(GPU_INDEX)): + pointclouds_pl, labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT) + is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=()) + + # simple model + pred = get_model(pointclouds_pl, is_training_pl) + loss = get_loss(pred, labels_pl) + pred_softmax = tf.nn.softmax(pred) + + # Add ops to save and restore all the variables. + saver = tf.compat.v1.train.Saver() + + # Create a session + config = tf.compat.v1.ConfigProto() + config.gpu_options.allow_growth = True + config.allow_soft_placement = True + config.log_device_placement = True + sess = tf.compat.v1.Session(config=config) + + # Restore variables from disk. + saver.restore(sess, MODEL_PATH) + log_string("Model restored.") + + ops = {'pointclouds_pl': pointclouds_pl, + 'labels_pl': labels_pl, + 'is_training_pl': is_training_pl, + 'pred': pred, + 'pred_softmax': pred_softmax, + 'loss': loss} + + total_correct = 0 + total_seen = 0 + fout_out_filelist = open(FLAGS.output_filelist, 'w') + for room_path in ROOM_PATH_LIST: + out_data_label_filename = os.path.basename(room_path)[:-4] + '_pred.txt' + out_data_label_filename = os.path.join(DUMP_DIR, out_data_label_filename) + out_gt_label_filename = os.path.basename(room_path)[:-4] + '_gt.txt' + out_gt_label_filename = os.path.join(DUMP_DIR, out_gt_label_filename) + print(room_path, out_data_label_filename) + a, b = eval_one_epoch(sess, ops, room_path, out_data_label_filename, out_gt_label_filename) + total_correct += a + total_seen += b + fout_out_filelist.write(out_data_label_filename+'\n') + fout_out_filelist.close() + log_string('all room eval accuracy: %f'% (total_correct / float(total_seen))) + +def eval_one_epoch(sess, ops, room_path, out_data_label_filename, out_gt_label_filename): + error_cnt = 0 + is_training = False + total_correct = 0 + total_seen = 0 + loss_sum = 0 + total_seen_class = [0 for _ in range(NUM_CLASSES)] + total_correct_class = [0 for _ in range(NUM_CLASSES)] + if FLAGS.visu: + fout = open(os.path.join(DUMP_DIR, os.path.basename(room_path)[:-4]+'_pred.obj'), 'w') + fout_gt = open(os.path.join(DUMP_DIR, os.path.basename(room_path)[:-4]+'_gt.obj'), 'w') + fout_data_label = open(out_data_label_filename, 'w') + fout_gt_label = open(out_gt_label_filename, 'w') + + current_data, current_label = indoor3d_util.room2blocks_wrapper_normalized(room_path, NUM_POINT) + current_data = current_data[:,0:NUM_POINT,:] + current_label = np.squeeze(current_label) + # Get room dimension.. + data_label = np.load(room_path) + data = data_label[:,0:6] + max_room_x = max(data[:,0]) + max_room_y = max(data[:,1]) + max_room_z = max(data[:,2]) + + file_size = current_data.shape[0] + num_batches = file_size // BATCH_SIZE + print(file_size) + + + for batch_idx in range(num_batches): + start_idx = batch_idx * BATCH_SIZE + end_idx = (batch_idx+1) * BATCH_SIZE + cur_batch_size = end_idx - start_idx + + feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :], + ops['labels_pl']: current_label[start_idx:end_idx], + ops['is_training_pl']: is_training} + loss_val, pred_val = sess.run([ops['loss'], ops['pred_softmax']], + feed_dict=feed_dict) + + if FLAGS.no_clutter: + pred_label = np.argmax(pred_val[:,:,0:12], 2) # BxN + else: + pred_label = np.argmax(pred_val, 2) # BxN + # Save prediction labels to OBJ file + for b in range(BATCH_SIZE): + pts = current_data[start_idx+b, :, :] + l = current_label[start_idx+b,:] + pts[:,6] *= max_room_x + pts[:,7] *= max_room_y + pts[:,8] *= max_room_z + pts[:,3:6] *= 255.0 + pred = pred_label[b, :] + for i in range(NUM_POINT): + color = indoor3d_util.g_label2color[pred[i]] + color_gt = indoor3d_util.g_label2color[current_label[start_idx+b, i]] + if FLAGS.visu: + fout.write('v %f %f %f %d %d %d\n' % (pts[i,6], pts[i,7], pts[i,8], color[0], color[1], color[2])) + fout_gt.write('v %f %f %f %d %d %d\n' % (pts[i,6], pts[i,7], pts[i,8], color_gt[0], color_gt[1], color_gt[2])) + fout_data_label.write('%f %f %f %d %d %d %f %d\n' % (pts[i,6], pts[i,7], pts[i,8], pts[i,3], pts[i,4], pts[i,5], pred_val[b,i,pred[i]], pred[i])) + fout_gt_label.write('%d\n' % (l[i])) + correct = np.sum(pred_label == current_label[start_idx:end_idx,:]) + total_correct += correct + total_seen += (cur_batch_size*NUM_POINT) + loss_sum += (loss_val*BATCH_SIZE) + for i in range(start_idx, end_idx): + for j in range(NUM_POINT): + l = current_label[i, j] + total_seen_class[l] += 1 + total_correct_class[l] += (pred_label[i-start_idx, j] == l) + + log_string('eval mean loss: %f' % (loss_sum / float(total_seen/NUM_POINT))) + log_string('eval accuracy: %f'% (total_correct / float(total_seen))) + fout_data_label.close() + fout_gt_label.close() + if FLAGS.visu: + fout.close() + fout_gt.close() + return total_correct, total_seen + + +if __name__=='__main__': + with tf.Graph().as_default(): + evaluate() + LOG_FOUT.close() diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/collect_indoor3d_data.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/collect_indoor3d_data.py new file mode 100644 index 000000000..08133ce66 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/collect_indoor3d_data.py @@ -0,0 +1,52 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = os.path.dirname(BASE_DIR) +sys.path.append(BASE_DIR) +import indoor3d_util + +anno_paths = [line.rstrip() for line in open(os.path.join(BASE_DIR, 'meta/anno_paths.txt'))] +anno_paths = [os.path.join(indoor3d_util.DATA_PATH, p) for p in anno_paths] + +output_folder = os.path.join(ROOT_DIR, 'data/stanford_indoor3d') +if not os.path.exists(output_folder): + os.mkdir(output_folder) + +# Note: there is an extra character in the v1.2 data in Area_5/hallway_6. It's fixed manually. +for anno_path in anno_paths: + print(anno_path) + try: + elements = anno_path.split('/') + out_filename = elements[-3]+'_'+elements[-2]+'.npy' # Area_1_hallway_1.npy + indoor3d_util.collect_point_label(anno_path, os.path.join(output_folder, out_filename), 'numpy') + except: + print(anno_path, 'ERROR!!') diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/download_data.sh b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/download_data.sh new file mode 100644 index 000000000..baeb5cf15 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/download_data.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Download HDF5 for indoor 3d semantic segmentation (around 1.6GB) +wget https://shapenet.cs.stanford.edu/media/indoor3d_sem_seg_hdf5_data.zip +unzip indoor3d_sem_seg_hdf5_data.zip +rm indoor3d_sem_seg_hdf5_data.zip + diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/eval_iou_accuracy.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/eval_iou_accuracy.py new file mode 100644 index 000000000..03d17e0e4 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/eval_iou_accuracy.py @@ -0,0 +1,68 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import numpy as np + +pred_data_label_filenames = [line.rstrip() for line in open('all_pred_data_label_filelist.txt')] +gt_label_filenames = [f.rstrip('_pred\.txt') + '_gt.txt' for f in pred_data_label_filenames] +num_room = len(gt_label_filenames) + + +gt_classes = [0 for _ in range(13)] +positive_classes = [0 for _ in range(13)] +true_positive_classes = [0 for _ in range(13)] +for i in range(num_room): + print(i) + data_label = np.loadtxt(pred_data_label_filenames[i]) + pred_label = data_label[:,-1] + gt_label = np.loadtxt(gt_label_filenames[i]) + print(gt_label.shape) + for j in range(gt_label.shape[0]): + gt_l = int(gt_label[j]) + pred_l = int(pred_label[j]) + gt_classes[gt_l] += 1 + positive_classes[pred_l] += 1 + true_positive_classes[gt_l] += int(gt_l==pred_l) + + +print(gt_classes) +print(positive_classes) +print(true_positive_classes) + + +print('Overall accuracy: {0}'.format(sum(true_positive_classes)/float(sum(positive_classes)))) + +print('IoU:') +iou_list = [] +for i in range(13): + iou = true_positive_classes[i]/float(gt_classes[i]+positive_classes[i]-true_positive_classes[i]) + print(iou) + iou_list.append(iou) + +print(sum(iou_list)/13.0) diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/gen_indoor3d_h5.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/gen_indoor3d_h5.py new file mode 100644 index 000000000..03a0c5977 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/gen_indoor3d_h5.py @@ -0,0 +1,115 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import numpy as np +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = os.path.dirname(BASE_DIR) +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(ROOT_DIR, 'utils')) +import data_prep_util +import indoor3d_util + +# Constants +data_dir = os.path.join(ROOT_DIR, 'data') +indoor3d_data_dir = os.path.join(data_dir, 'stanford_indoor3d') +NUM_POINT = 4096 +H5_BATCH_SIZE = 1000 +data_dim = [NUM_POINT, 9] +label_dim = [NUM_POINT] +data_dtype = 'float32' +label_dtype = 'uint8' + +# Set paths +filelist = os.path.join(BASE_DIR, 'meta/all_data_label.txt') +data_label_files = [os.path.join(indoor3d_data_dir, line.rstrip()) for line in open(filelist)] +output_dir = os.path.join(data_dir, 'indoor3d_sem_seg_hdf5_data') +if not os.path.exists(output_dir): + os.mkdir(output_dir) +output_filename_prefix = os.path.join(output_dir, 'ply_data_all') +output_room_filelist = os.path.join(output_dir, 'room_filelist.txt') +fout_room = open(output_room_filelist, 'w') + +# -------------------------------------- +# ----- BATCH WRITE TO HDF5 ----- +# -------------------------------------- +batch_data_dim = [H5_BATCH_SIZE] + data_dim +batch_label_dim = [H5_BATCH_SIZE] + label_dim +h5_batch_data = np.zeros(batch_data_dim, dtype = np.float32) +h5_batch_label = np.zeros(batch_label_dim, dtype = np.uint8) +buffer_size = 0 # state: record how many samples are currently in buffer +h5_index = 0 # state: the next h5 file to save + +def insert_batch(data, label, last_batch=False): + global h5_batch_data, h5_batch_label + global buffer_size, h5_index + data_size = data.shape[0] + # If there is enough space, just insert + if buffer_size + data_size <= h5_batch_data.shape[0]: + h5_batch_data[buffer_size:buffer_size+data_size, ...] = data + h5_batch_label[buffer_size:buffer_size+data_size] = label + buffer_size += data_size + else: # not enough space + capacity = h5_batch_data.shape[0] - buffer_size + assert(capacity>=0) + if capacity > 0: + h5_batch_data[buffer_size:buffer_size+capacity, ...] = data[0:capacity, ...] + h5_batch_label[buffer_size:buffer_size+capacity, ...] = label[0:capacity, ...] + # Save batch data and label to h5 file, reset buffer_size + h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5' + data_prep_util.save_h5(h5_filename, h5_batch_data, h5_batch_label, data_dtype, label_dtype) + print('Stored {0} with size {1}'.format(h5_filename, h5_batch_data.shape[0])) + h5_index += 1 + buffer_size = 0 + # recursive call + insert_batch(data[capacity:, ...], label[capacity:, ...], last_batch) + if last_batch and buffer_size > 0: + h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5' + data_prep_util.save_h5(h5_filename, h5_batch_data[0:buffer_size, ...], h5_batch_label[0:buffer_size, ...], data_dtype, label_dtype) + print('Stored {0} with size {1}'.format(h5_filename, buffer_size)) + h5_index += 1 + buffer_size = 0 + return + + +sample_cnt = 0 +for i, data_label_filename in enumerate(data_label_files): + print(data_label_filename) + data, label = indoor3d_util.room2blocks_wrapper_normalized(data_label_filename, NUM_POINT, block_size=1.0, stride=0.5, + random_sample=False, sample_num=None) + print('{0}, {1}'.format(data.shape, label.shape)) + for _ in range(data.shape[0]): + fout_room.write(os.path.basename(data_label_filename)[0:-4]+'\n') + + sample_cnt += data.shape[0] + insert_batch(data, label, i == len(data_label_files)-1) + +fout_room.close() +print("Total samples: {0}".format(sample_cnt)) diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/indoor3d_util.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/indoor3d_util.py new file mode 100644 index 000000000..ea349d241 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/indoor3d_util.py @@ -0,0 +1,619 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import numpy as np +import glob +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = os.path.dirname(BASE_DIR) +sys.path.append(BASE_DIR) + +# ----------------------------------------------------------------------------- +# CONSTANTS +# ----------------------------------------------------------------------------- + +DATA_PATH = os.path.join(ROOT_DIR, 'data', 'Stanford3dDataset_v1.2_Aligned_Version') +g_classes = [x.rstrip() for x in open(os.path.join(BASE_DIR, 'meta/class_names.txt'))] +g_class2label = {cls: i for i,cls in enumerate(g_classes)} +g_class2color = {'ceiling': [0,255,0], + 'floor': [0,0,255], + 'wall': [0,255,255], + 'beam': [255,255,0], + 'column': [255,0,255], + 'window': [100,100,255], + 'door': [200,200,100], + 'table': [170,120,200], + 'chair': [255,0,0], + 'sofa': [200,100,100], + 'bookcase': [10,200,100], + 'board': [200,200,200], + 'clutter': [50,50,50]} +g_easy_view_labels = [7,8,9,10,11,1] +g_label2color = {g_classes.index(cls): g_class2color[cls] for cls in g_classes} + + +# ----------------------------------------------------------------------------- +# CONVERT ORIGINAL DATA TO OUR DATA_LABEL FILES +# ----------------------------------------------------------------------------- + +def collect_point_label(anno_path, out_filename, file_format='txt'): + """ Convert original dataset files to data_label file (each line is XYZRGBL). + We aggregated all the points from each instance in the room. + + Args: + anno_path: path to annotations. e.g. Area_1/office_2/Annotations/ + out_filename: path to save collected points and labels (each line is XYZRGBL) + file_format: txt or numpy, determines what file format to save. + Returns: + None + Note: + the points are shifted before save, the most negative point is now at origin. + """ + points_list = [] + + for f in glob.glob(os.path.join(anno_path, '*.txt')): + cls = os.path.basename(f).split('_')[0] + if cls not in g_classes: # note: in some room there is 'staris' class.. + cls = 'clutter' + points = np.loadtxt(f) + labels = np.ones((points.shape[0],1)) * g_class2label[cls] + points_list.append(np.concatenate([points, labels], 1)) # Nx7 + + data_label = np.concatenate(points_list, 0) + xyz_min = np.amin(data_label, axis=0)[0:3] + data_label[:, 0:3] -= xyz_min + + if file_format=='txt': + fout = open(out_filename, 'w') + for i in range(data_label.shape[0]): + fout.write('%f %f %f %d %d %d %d\n' % \ + (data_label[i,0], data_label[i,1], data_label[i,2], + data_label[i,3], data_label[i,4], data_label[i,5], + data_label[i,6])) + fout.close() + elif file_format=='numpy': + np.save(out_filename, data_label) + else: + print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \ + (file_format)) + exit() + +def point_label_to_obj(input_filename, out_filename, label_color=True, easy_view=False, no_wall=False): + """ For visualization of a room from data_label file, + input_filename: each line is X Y Z R G B L + out_filename: OBJ filename, + visualize input file by coloring point with label color + easy_view: only visualize furnitures and floor + """ + data_label = np.loadtxt(input_filename) + data = data_label[:, 0:6] + label = data_label[:, -1].astype(int) + fout = open(out_filename, 'w') + for i in range(data.shape[0]): + color = g_label2color[label[i]] + if easy_view and (label[i] not in g_easy_view_labels): + continue + if no_wall and ((label[i] == 2) or (label[i]==0)): + continue + if label_color: + fout.write('v %f %f %f %d %d %d\n' % \ + (data[i,0], data[i,1], data[i,2], color[0], color[1], color[2])) + else: + fout.write('v %f %f %f %d %d %d\n' % \ + (data[i,0], data[i,1], data[i,2], data[i,3], data[i,4], data[i,5])) + fout.close() + + + +# ----------------------------------------------------------------------------- +# PREPARE BLOCK DATA FOR DEEPNETS TRAINING/TESTING +# ----------------------------------------------------------------------------- + +def sample_data(data, num_sample): + """ data is in N x ... + we want to keep num_samplexC of them. + if N > num_sample, we will randomly keep num_sample of them. + if N < num_sample, we will randomly duplicate samples. + """ + N = data.shape[0] + if (N == num_sample): + return data, range(N) + elif (N > num_sample): + sample = np.random.choice(N, num_sample) + return data[sample, ...], sample + else: + sample = np.random.choice(N, num_sample-N) + dup_data = data[sample, ...] + return np.concatenate([data, dup_data], 0), range(N)+list(sample) + +def sample_data_label(data, label, num_sample): + new_data, sample_indices = sample_data(data, num_sample) + new_label = label[sample_indices] + return new_data, new_label + +def room2blocks(data, label, num_point, block_size=1.0, stride=1.0, + random_sample=False, sample_num=None, sample_aug=1): + """ Prepare block training data. + Args: + data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1] + assumes the data is shifted (min point is origin) and aligned + (aligned with XYZ axis) + label: N size uint8 numpy array from 0-12 + num_point: int, how many points to sample in each block + block_size: float, physical size of the block in meters + stride: float, stride for block sweeping + random_sample: bool, if True, we will randomly sample blocks in the room + sample_num: int, if random sample, how many blocks to sample + [default: room area] + sample_aug: if random sample, how much aug + Returns: + block_datas: K x num_point x 6 np array of XYZRGB, RGB is in [0,1] + block_labels: K x num_point x 1 np array of uint8 labels + + TODO: for this version, blocking is in fixed, non-overlapping pattern. + """ + assert(stride<=block_size) + + limit = np.amax(data, 0)[0:3] + + # Get the corner location for our sampling blocks + xbeg_list = [] + ybeg_list = [] + if not random_sample: + num_block_x = int(np.ceil((limit[0] - block_size) / stride)) + 1 + num_block_y = int(np.ceil((limit[1] - block_size) / stride)) + 1 + for i in range(num_block_x): + for j in range(num_block_y): + xbeg_list.append(i*stride) + ybeg_list.append(j*stride) + else: + num_block_x = int(np.ceil(limit[0] / block_size)) + num_block_y = int(np.ceil(limit[1] / block_size)) + if sample_num is None: + sample_num = num_block_x * num_block_y * sample_aug + for _ in range(sample_num): + xbeg = np.random.uniform(-block_size, limit[0]) + ybeg = np.random.uniform(-block_size, limit[1]) + xbeg_list.append(xbeg) + ybeg_list.append(ybeg) + + # Collect blocks + block_data_list = [] + block_label_list = [] + idx = 0 + for idx in range(len(xbeg_list)): + xbeg = xbeg_list[idx] + ybeg = ybeg_list[idx] + xcond = (data[:,0]<=xbeg+block_size) & (data[:,0]>=xbeg) + ycond = (data[:,1]<=ybeg+block_size) & (data[:,1]>=ybeg) + cond = xcond & ycond + if np.sum(cond) < 100: # discard block if there are less than 100 pts. + continue + + block_data = data[cond, :] + block_label = label[cond] + + # randomly subsample data + block_data_sampled, block_label_sampled = \ + sample_data_label(block_data, block_label, num_point) + block_data_list.append(np.expand_dims(block_data_sampled, 0)) + block_label_list.append(np.expand_dims(block_label_sampled, 0)) + + return np.concatenate(block_data_list, 0), \ + np.concatenate(block_label_list, 0) + + +def room2blocks_plus(data_label, num_point, block_size, stride, + random_sample, sample_num, sample_aug): + """ room2block with input filename and RGB preprocessing. + """ + data = data_label[:,0:6] + data[:,3:6] /= 255.0 + label = data_label[:,-1].astype(np.uint8) + + return room2blocks(data, label, num_point, block_size, stride, + random_sample, sample_num, sample_aug) + +def room2blocks_wrapper(data_label_filename, num_point, block_size=1.0, stride=1.0, + random_sample=False, sample_num=None, sample_aug=1): + if data_label_filename[-3:] == 'txt': + data_label = np.loadtxt(data_label_filename) + elif data_label_filename[-3:] == 'npy': + data_label = np.load(data_label_filename) + else: + print('Unknown file type! exiting.') + exit() + return room2blocks_plus(data_label, num_point, block_size, stride, + random_sample, sample_num, sample_aug) + +def room2blocks_plus_normalized(data_label, num_point, block_size, stride, + random_sample, sample_num, sample_aug): + """ room2block, with input filename and RGB preprocessing. + for each block centralize XYZ, add normalized XYZ as 678 channels + """ + data = data_label[:,0:6] + data[:,3:6] /= 255.0 + label = data_label[:,-1].astype(np.uint8) + max_room_x = max(data[:,0]) + max_room_y = max(data[:,1]) + max_room_z = max(data[:,2]) + + data_batch, label_batch = room2blocks(data, label, num_point, block_size, stride, + random_sample, sample_num, sample_aug) + new_data_batch = np.zeros((data_batch.shape[0], num_point, 9)) + for b in range(data_batch.shape[0]): + new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x + new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y + new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z + minx = min(data_batch[b, :, 0]) + miny = min(data_batch[b, :, 1]) + data_batch[b, :, 0] -= (minx+block_size/2) + data_batch[b, :, 1] -= (miny+block_size/2) + new_data_batch[:, :, 0:6] = data_batch + return new_data_batch, label_batch + + +def room2blocks_wrapper_normalized(data_label_filename, num_point, block_size=1.0, stride=1.0, + random_sample=False, sample_num=None, sample_aug=1): + if data_label_filename[-3:] == 'txt': + data_label = np.loadtxt(data_label_filename) + elif data_label_filename[-3:] == 'npy': + data_label = np.load(data_label_filename) + else: + print('Unknown file type! exiting.') + exit() + return room2blocks_plus_normalized(data_label, num_point, block_size, stride, + random_sample, sample_num, sample_aug) + +def room2samples(data, label, sample_num_point): + """ Prepare whole room samples. + + Args: + data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1] + assumes the data is shifted (min point is origin) and + aligned (aligned with XYZ axis) + label: N size uint8 numpy array from 0-12 + sample_num_point: int, how many points to sample in each sample + Returns: + sample_datas: K x sample_num_point x 9 + numpy array of XYZRGBX'Y'Z', RGB is in [0,1] + sample_labels: K x sample_num_point x 1 np array of uint8 labels + """ + N = data.shape[0] + order = np.arange(N) + np.random.shuffle(order) + data = data[order, :] + label = label[order] + + batch_num = int(np.ceil(N / float(sample_num_point))) + sample_datas = np.zeros((batch_num, sample_num_point, 6)) + sample_labels = np.zeros((batch_num, sample_num_point, 1)) + + for i in range(batch_num): + beg_idx = i*sample_num_point + end_idx = min((i+1)*sample_num_point, N) + num = end_idx - beg_idx + sample_datas[i,0:num,:] = data[beg_idx:end_idx, :] + sample_labels[i,0:num,0] = label[beg_idx:end_idx] + if num < sample_num_point: + makeup_indices = np.random.choice(N, sample_num_point - num) + sample_datas[i,num:,:] = data[makeup_indices, :] + sample_labels[i,num:,0] = label[makeup_indices] + return sample_datas, sample_labels + +def room2samples_plus_normalized(data_label, num_point): + """ room2sample, with input filename and RGB preprocessing. + for each block centralize XYZ, add normalized XYZ as 678 channels + """ + data = data_label[:,0:6] + data[:,3:6] /= 255.0 + label = data_label[:,-1].astype(np.uint8) + max_room_x = max(data[:,0]) + max_room_y = max(data[:,1]) + max_room_z = max(data[:,2]) + #print(max_room_x, max_room_y, max_room_z) + + data_batch, label_batch = room2samples(data, label, num_point) + new_data_batch = np.zeros((data_batch.shape[0], num_point, 9)) + for b in range(data_batch.shape[0]): + new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x + new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y + new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z + #minx = min(data_batch[b, :, 0]) + #miny = min(data_batch[b, :, 1]) + #data_batch[b, :, 0] -= (minx+block_size/2) + #data_batch[b, :, 1] -= (miny+block_size/2) + new_data_batch[:, :, 0:6] = data_batch + return new_data_batch, label_batch + + +def room2samples_wrapper_normalized(data_label_filename, num_point): + if data_label_filename[-3:] == 'txt': + data_label = np.loadtxt(data_label_filename) + elif data_label_filename[-3:] == 'npy': + data_label = np.load(data_label_filename) + else: + print('Unknown file type! exiting.') + exit() + return room2samples_plus_normalized(data_label, num_point) + + +# ----------------------------------------------------------------------------- +# EXTRACT INSTANCE BBOX FROM ORIGINAL DATA (for detection evaluation) +# ----------------------------------------------------------------------------- + +def collect_bounding_box(anno_path, out_filename): + """ Compute bounding boxes from each instance in original dataset files on + one room. **We assume the bbox is aligned with XYZ coordinate.** + + Args: + anno_path: path to annotations. e.g. Area_1/office_2/Annotations/ + out_filename: path to save instance bounding boxes for that room. + each line is x1 y1 z1 x2 y2 z2 label, + where (x1,y1,z1) is the point on the diagonal closer to origin + Returns: + None + Note: + room points are shifted, the most negative point is now at origin. + """ + bbox_label_list = [] + + for f in glob.glob(os.path.join(anno_path, '*.txt')): + cls = os.path.basename(f).split('_')[0] + if cls not in g_classes: # note: in some room there is 'staris' class.. + cls = 'clutter' + points = np.loadtxt(f) + label = g_class2label[cls] + # Compute tightest axis aligned bounding box + xyz_min = np.amin(points[:, 0:3], axis=0) + xyz_max = np.amax(points[:, 0:3], axis=0) + ins_bbox_label = np.expand_dims( + np.concatenate([xyz_min, xyz_max, np.array([label])], 0), 0) + bbox_label_list.append(ins_bbox_label) + + bbox_label = np.concatenate(bbox_label_list, 0) + room_xyz_min = np.amin(bbox_label[:, 0:3], axis=0) + bbox_label[:, 0:3] -= room_xyz_min + bbox_label[:, 3:6] -= room_xyz_min + + fout = open(out_filename, 'w') + for i in range(bbox_label.shape[0]): + fout.write('%f %f %f %f %f %f %d\n' % \ + (bbox_label[i,0], bbox_label[i,1], bbox_label[i,2], + bbox_label[i,3], bbox_label[i,4], bbox_label[i,5], + bbox_label[i,6])) + fout.close() + +def bbox_label_to_obj(input_filename, out_filename_prefix, easy_view=False): + """ Visualization of bounding boxes. + + Args: + input_filename: each line is x1 y1 z1 x2 y2 z2 label + out_filename_prefix: OBJ filename prefix, + visualize object by g_label2color + easy_view: if True, only visualize furniture and floor + Returns: + output a list of OBJ file and MTL files with the same prefix + """ + bbox_label = np.loadtxt(input_filename) + bbox = bbox_label[:, 0:6] + label = bbox_label[:, -1].astype(int) + v_cnt = 0 # count vertex + ins_cnt = 0 # count instance + for i in range(bbox.shape[0]): + if easy_view and (label[i] not in g_easy_view_labels): + continue + obj_filename = out_filename_prefix+'_'+g_classes[label[i]]+'_'+str(ins_cnt)+'.obj' + mtl_filename = out_filename_prefix+'_'+g_classes[label[i]]+'_'+str(ins_cnt)+'.mtl' + fout_obj = open(obj_filename, 'w') + fout_mtl = open(mtl_filename, 'w') + fout_obj.write('mtllib %s\n' % (os.path.basename(mtl_filename))) + + length = bbox[i, 3:6] - bbox[i, 0:3] + a = length[0] + b = length[1] + c = length[2] + x = bbox[i, 0] + y = bbox[i, 1] + z = bbox[i, 2] + color = np.array(g_label2color[label[i]], dtype=float) / 255.0 + + material = 'material%d' % (ins_cnt) + fout_obj.write('usemtl %s\n' % (material)) + fout_obj.write('v %f %f %f\n' % (x,y,z+c)) + fout_obj.write('v %f %f %f\n' % (x,y+b,z+c)) + fout_obj.write('v %f %f %f\n' % (x+a,y+b,z+c)) + fout_obj.write('v %f %f %f\n' % (x+a,y,z+c)) + fout_obj.write('v %f %f %f\n' % (x,y,z)) + fout_obj.write('v %f %f %f\n' % (x,y+b,z)) + fout_obj.write('v %f %f %f\n' % (x+a,y+b,z)) + fout_obj.write('v %f %f %f\n' % (x+a,y,z)) + fout_obj.write('g default\n') + v_cnt = 0 # for individual box + fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 3+v_cnt, 2+v_cnt, 1+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (1+v_cnt, 2+v_cnt, 6+v_cnt, 5+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (7+v_cnt, 6+v_cnt, 2+v_cnt, 3+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 8+v_cnt, 7+v_cnt, 3+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 8+v_cnt, 4+v_cnt, 1+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 6+v_cnt, 7+v_cnt, 8+v_cnt)) + fout_obj.write('\n') + + fout_mtl.write('newmtl %s\n' % (material)) + fout_mtl.write('Kd %f %f %f\n' % (color[0], color[1], color[2])) + fout_mtl.write('\n') + fout_obj.close() + fout_mtl.close() + + v_cnt += 8 + ins_cnt += 1 + +def bbox_label_to_obj_room(input_filename, out_filename_prefix, easy_view=False, permute=None, center=False, exclude_table=False): + """ Visualization of bounding boxes. + + Args: + input_filename: each line is x1 y1 z1 x2 y2 z2 label + out_filename_prefix: OBJ filename prefix, + visualize object by g_label2color + easy_view: if True, only visualize furniture and floor + permute: if not None, permute XYZ for rendering, e.g. [0 2 1] + center: if True, move obj to have zero origin + Returns: + output a list of OBJ file and MTL files with the same prefix + """ + bbox_label = np.loadtxt(input_filename) + bbox = bbox_label[:, 0:6] + if permute is not None: + assert(len(permute)==3) + permute = np.array(permute) + bbox[:,0:3] = bbox[:,permute] + bbox[:,3:6] = bbox[:,permute+3] + if center: + xyz_max = np.amax(bbox[:,3:6], 0) + bbox[:,0:3] -= (xyz_max/2.0) + bbox[:,3:6] -= (xyz_max/2.0) + bbox /= np.max(xyz_max/2.0) + label = bbox_label[:, -1].astype(int) + obj_filename = out_filename_prefix+'.obj' + mtl_filename = out_filename_prefix+'.mtl' + + fout_obj = open(obj_filename, 'w') + fout_mtl = open(mtl_filename, 'w') + fout_obj.write('mtllib %s\n' % (os.path.basename(mtl_filename))) + v_cnt = 0 # count vertex + ins_cnt = 0 # count instance + for i in range(bbox.shape[0]): + if easy_view and (label[i] not in g_easy_view_labels): + continue + if exclude_table and label[i] == g_classes.index('table'): + continue + + length = bbox[i, 3:6] - bbox[i, 0:3] + a = length[0] + b = length[1] + c = length[2] + x = bbox[i, 0] + y = bbox[i, 1] + z = bbox[i, 2] + color = np.array(g_label2color[label[i]], dtype=float) / 255.0 + + material = 'material%d' % (ins_cnt) + fout_obj.write('usemtl %s\n' % (material)) + fout_obj.write('v %f %f %f\n' % (x,y,z+c)) + fout_obj.write('v %f %f %f\n' % (x,y+b,z+c)) + fout_obj.write('v %f %f %f\n' % (x+a,y+b,z+c)) + fout_obj.write('v %f %f %f\n' % (x+a,y,z+c)) + fout_obj.write('v %f %f %f\n' % (x,y,z)) + fout_obj.write('v %f %f %f\n' % (x,y+b,z)) + fout_obj.write('v %f %f %f\n' % (x+a,y+b,z)) + fout_obj.write('v %f %f %f\n' % (x+a,y,z)) + fout_obj.write('g default\n') + fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 3+v_cnt, 2+v_cnt, 1+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (1+v_cnt, 2+v_cnt, 6+v_cnt, 5+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (7+v_cnt, 6+v_cnt, 2+v_cnt, 3+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 8+v_cnt, 7+v_cnt, 3+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 8+v_cnt, 4+v_cnt, 1+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 6+v_cnt, 7+v_cnt, 8+v_cnt)) + fout_obj.write('\n') + + fout_mtl.write('newmtl %s\n' % (material)) + fout_mtl.write('Kd %f %f %f\n' % (color[0], color[1], color[2])) + fout_mtl.write('\n') + + v_cnt += 8 + ins_cnt += 1 + + fout_obj.close() + fout_mtl.close() + + +def collect_point_bounding_box(anno_path, out_filename, file_format): + """ Compute bounding boxes from each instance in original dataset files on + one room. **We assume the bbox is aligned with XYZ coordinate.** + Save both the point XYZRGB and the bounding box for the point's + parent element. + + Args: + anno_path: path to annotations. e.g. Area_1/office_2/Annotations/ + out_filename: path to save instance bounding boxes for each point, + plus the point's XYZRGBL + each line is XYZRGBL offsetX offsetY offsetZ a b c, + where cx = X+offsetX, cy=X+offsetY, cz=Z+offsetZ + where (cx,cy,cz) is center of the box, a,b,c are distances from center + to the surfaces of the box, i.e. x1 = cx-a, x2 = cx+a, y1=cy-b etc. + file_format: output file format, txt or numpy + Returns: + None + + Note: + room points are shifted, the most negative point is now at origin. + """ + point_bbox_list = [] + + for f in glob.glob(os.path.join(anno_path, '*.txt')): + cls = os.path.basename(f).split('_')[0] + if cls not in g_classes: # note: in some room there is 'staris' class.. + cls = 'clutter' + points = np.loadtxt(f) # Nx6 + label = g_class2label[cls] # N, + # Compute tightest axis aligned bounding box + xyz_min = np.amin(points[:, 0:3], axis=0) # 3, + xyz_max = np.amax(points[:, 0:3], axis=0) # 3, + xyz_center = (xyz_min + xyz_max) / 2 + dimension = (xyz_max - xyz_min) / 2 + + xyz_offsets = xyz_center - points[:,0:3] # Nx3 + dimensions = np.ones((points.shape[0],3)) * dimension # Nx3 + labels = np.ones((points.shape[0],1)) * label # N + point_bbox_list.append(np.concatenate([points, labels, + xyz_offsets, dimensions], 1)) # Nx13 + + point_bbox = np.concatenate(point_bbox_list, 0) # KxNx13 + room_xyz_min = np.amin(point_bbox[:, 0:3], axis=0) + point_bbox[:, 0:3] -= room_xyz_min + + if file_format == 'txt': + fout = open(out_filename, 'w') + for i in range(point_bbox.shape[0]): + fout.write('%f %f %f %d %d %d %d %f %f %f %f %f %f\n' % \ + (point_bbox[i,0], point_bbox[i,1], point_bbox[i,2], + point_bbox[i,3], point_bbox[i,4], point_bbox[i,5], + point_bbox[i,6], + point_bbox[i,7], point_bbox[i,8], point_bbox[i,9], + point_bbox[i,10], point_bbox[i,11], point_bbox[i,12])) + + fout.close() + elif file_format == 'numpy': + np.save(out_filename, point_bbox) + else: + print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \ + (file_format)) + exit() + + diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/all_data_label.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/all_data_label.txt new file mode 100644 index 000000000..636e686a5 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/all_data_label.txt @@ -0,0 +1,272 @@ +Area_1_conferenceRoom_1.npy +Area_1_conferenceRoom_2.npy +Area_1_copyRoom_1.npy +Area_1_hallway_1.npy +Area_1_hallway_2.npy +Area_1_hallway_3.npy +Area_1_hallway_4.npy +Area_1_hallway_5.npy +Area_1_hallway_6.npy +Area_1_hallway_7.npy +Area_1_hallway_8.npy +Area_1_office_10.npy +Area_1_office_11.npy +Area_1_office_12.npy +Area_1_office_13.npy +Area_1_office_14.npy +Area_1_office_15.npy +Area_1_office_16.npy +Area_1_office_17.npy +Area_1_office_18.npy +Area_1_office_19.npy +Area_1_office_1.npy +Area_1_office_20.npy +Area_1_office_21.npy +Area_1_office_22.npy +Area_1_office_23.npy +Area_1_office_24.npy +Area_1_office_25.npy +Area_1_office_26.npy +Area_1_office_27.npy +Area_1_office_28.npy +Area_1_office_29.npy +Area_1_office_2.npy +Area_1_office_30.npy +Area_1_office_31.npy +Area_1_office_3.npy +Area_1_office_4.npy +Area_1_office_5.npy +Area_1_office_6.npy +Area_1_office_7.npy +Area_1_office_8.npy +Area_1_office_9.npy +Area_1_pantry_1.npy +Area_1_WC_1.npy +Area_2_auditorium_1.npy +Area_2_auditorium_2.npy +Area_2_conferenceRoom_1.npy +Area_2_hallway_10.npy +Area_2_hallway_11.npy +Area_2_hallway_12.npy +Area_2_hallway_1.npy +Area_2_hallway_2.npy +Area_2_hallway_3.npy +Area_2_hallway_4.npy +Area_2_hallway_5.npy +Area_2_hallway_6.npy +Area_2_hallway_7.npy +Area_2_hallway_8.npy +Area_2_hallway_9.npy +Area_2_office_10.npy +Area_2_office_11.npy +Area_2_office_12.npy +Area_2_office_13.npy +Area_2_office_14.npy +Area_2_office_1.npy +Area_2_office_2.npy +Area_2_office_3.npy +Area_2_office_4.npy +Area_2_office_5.npy +Area_2_office_6.npy +Area_2_office_7.npy +Area_2_office_8.npy +Area_2_office_9.npy +Area_2_storage_1.npy +Area_2_storage_2.npy +Area_2_storage_3.npy +Area_2_storage_4.npy +Area_2_storage_5.npy +Area_2_storage_6.npy +Area_2_storage_7.npy +Area_2_storage_8.npy +Area_2_storage_9.npy +Area_2_WC_1.npy +Area_2_WC_2.npy +Area_3_conferenceRoom_1.npy +Area_3_hallway_1.npy +Area_3_hallway_2.npy +Area_3_hallway_3.npy +Area_3_hallway_4.npy +Area_3_hallway_5.npy +Area_3_hallway_6.npy +Area_3_lounge_1.npy +Area_3_lounge_2.npy +Area_3_office_10.npy +Area_3_office_1.npy +Area_3_office_2.npy +Area_3_office_3.npy +Area_3_office_4.npy +Area_3_office_5.npy +Area_3_office_6.npy +Area_3_office_7.npy +Area_3_office_8.npy +Area_3_office_9.npy +Area_3_storage_1.npy +Area_3_storage_2.npy +Area_3_WC_1.npy +Area_3_WC_2.npy +Area_4_conferenceRoom_1.npy +Area_4_conferenceRoom_2.npy +Area_4_conferenceRoom_3.npy +Area_4_hallway_10.npy +Area_4_hallway_11.npy +Area_4_hallway_12.npy +Area_4_hallway_13.npy +Area_4_hallway_14.npy +Area_4_hallway_1.npy +Area_4_hallway_2.npy +Area_4_hallway_3.npy +Area_4_hallway_4.npy +Area_4_hallway_5.npy +Area_4_hallway_6.npy +Area_4_hallway_7.npy +Area_4_hallway_8.npy +Area_4_hallway_9.npy +Area_4_lobby_1.npy +Area_4_lobby_2.npy +Area_4_office_10.npy +Area_4_office_11.npy +Area_4_office_12.npy +Area_4_office_13.npy +Area_4_office_14.npy +Area_4_office_15.npy +Area_4_office_16.npy +Area_4_office_17.npy +Area_4_office_18.npy +Area_4_office_19.npy +Area_4_office_1.npy +Area_4_office_20.npy +Area_4_office_21.npy +Area_4_office_22.npy +Area_4_office_2.npy +Area_4_office_3.npy +Area_4_office_4.npy +Area_4_office_5.npy +Area_4_office_6.npy +Area_4_office_7.npy +Area_4_office_8.npy +Area_4_office_9.npy +Area_4_storage_1.npy +Area_4_storage_2.npy +Area_4_storage_3.npy +Area_4_storage_4.npy +Area_4_WC_1.npy +Area_4_WC_2.npy +Area_4_WC_3.npy +Area_4_WC_4.npy +Area_5_conferenceRoom_1.npy +Area_5_conferenceRoom_2.npy +Area_5_conferenceRoom_3.npy +Area_5_hallway_10.npy +Area_5_hallway_11.npy +Area_5_hallway_12.npy +Area_5_hallway_13.npy +Area_5_hallway_14.npy +Area_5_hallway_15.npy +Area_5_hallway_1.npy +Area_5_hallway_2.npy +Area_5_hallway_3.npy +Area_5_hallway_4.npy +Area_5_hallway_5.npy +Area_5_hallway_6.npy +Area_5_hallway_7.npy +Area_5_hallway_8.npy +Area_5_hallway_9.npy +Area_5_lobby_1.npy +Area_5_office_10.npy +Area_5_office_11.npy +Area_5_office_12.npy +Area_5_office_13.npy +Area_5_office_14.npy +Area_5_office_15.npy +Area_5_office_16.npy +Area_5_office_17.npy +Area_5_office_18.npy +Area_5_office_19.npy +Area_5_office_1.npy +Area_5_office_20.npy +Area_5_office_21.npy +Area_5_office_22.npy +Area_5_office_23.npy +Area_5_office_24.npy +Area_5_office_25.npy +Area_5_office_26.npy +Area_5_office_27.npy +Area_5_office_28.npy +Area_5_office_29.npy +Area_5_office_2.npy +Area_5_office_30.npy +Area_5_office_31.npy +Area_5_office_32.npy +Area_5_office_33.npy +Area_5_office_34.npy +Area_5_office_35.npy +Area_5_office_36.npy +Area_5_office_37.npy +Area_5_office_38.npy +Area_5_office_39.npy +Area_5_office_3.npy +Area_5_office_40.npy +Area_5_office_41.npy +Area_5_office_42.npy +Area_5_office_4.npy +Area_5_office_5.npy +Area_5_office_6.npy +Area_5_office_7.npy +Area_5_office_8.npy +Area_5_office_9.npy +Area_5_pantry_1.npy +Area_5_storage_1.npy +Area_5_storage_2.npy +Area_5_storage_3.npy +Area_5_storage_4.npy +Area_5_WC_1.npy +Area_5_WC_2.npy +Area_6_conferenceRoom_1.npy +Area_6_copyRoom_1.npy +Area_6_hallway_1.npy +Area_6_hallway_2.npy +Area_6_hallway_3.npy +Area_6_hallway_4.npy +Area_6_hallway_5.npy +Area_6_hallway_6.npy +Area_6_lounge_1.npy +Area_6_office_10.npy +Area_6_office_11.npy +Area_6_office_12.npy +Area_6_office_13.npy +Area_6_office_14.npy +Area_6_office_15.npy +Area_6_office_16.npy +Area_6_office_17.npy +Area_6_office_18.npy +Area_6_office_19.npy +Area_6_office_1.npy +Area_6_office_20.npy +Area_6_office_21.npy +Area_6_office_22.npy +Area_6_office_23.npy +Area_6_office_24.npy +Area_6_office_25.npy +Area_6_office_26.npy +Area_6_office_27.npy +Area_6_office_28.npy +Area_6_office_29.npy +Area_6_office_2.npy +Area_6_office_30.npy +Area_6_office_31.npy +Area_6_office_32.npy +Area_6_office_33.npy +Area_6_office_34.npy +Area_6_office_35.npy +Area_6_office_36.npy +Area_6_office_37.npy +Area_6_office_3.npy +Area_6_office_4.npy +Area_6_office_5.npy +Area_6_office_6.npy +Area_6_office_7.npy +Area_6_office_8.npy +Area_6_office_9.npy +Area_6_openspace_1.npy +Area_6_pantry_1.npy diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/anno_paths.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/anno_paths.txt new file mode 100644 index 000000000..0ad2f2599 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/anno_paths.txt @@ -0,0 +1,272 @@ +Area_1/conferenceRoom_1/Annotations +Area_1/conferenceRoom_2/Annotations +Area_1/copyRoom_1/Annotations +Area_1/hallway_1/Annotations +Area_1/hallway_2/Annotations +Area_1/hallway_3/Annotations +Area_1/hallway_4/Annotations +Area_1/hallway_5/Annotations +Area_1/hallway_6/Annotations +Area_1/hallway_7/Annotations +Area_1/hallway_8/Annotations +Area_1/office_10/Annotations +Area_1/office_11/Annotations +Area_1/office_12/Annotations +Area_1/office_13/Annotations +Area_1/office_14/Annotations +Area_1/office_15/Annotations +Area_1/office_16/Annotations +Area_1/office_17/Annotations +Area_1/office_18/Annotations +Area_1/office_19/Annotations +Area_1/office_1/Annotations +Area_1/office_20/Annotations +Area_1/office_21/Annotations +Area_1/office_22/Annotations +Area_1/office_23/Annotations +Area_1/office_24/Annotations +Area_1/office_25/Annotations +Area_1/office_26/Annotations +Area_1/office_27/Annotations +Area_1/office_28/Annotations +Area_1/office_29/Annotations +Area_1/office_2/Annotations +Area_1/office_30/Annotations +Area_1/office_31/Annotations +Area_1/office_3/Annotations +Area_1/office_4/Annotations +Area_1/office_5/Annotations +Area_1/office_6/Annotations +Area_1/office_7/Annotations +Area_1/office_8/Annotations +Area_1/office_9/Annotations +Area_1/pantry_1/Annotations +Area_1/WC_1/Annotations +Area_2/auditorium_1/Annotations +Area_2/auditorium_2/Annotations +Area_2/conferenceRoom_1/Annotations +Area_2/hallway_10/Annotations +Area_2/hallway_11/Annotations +Area_2/hallway_12/Annotations +Area_2/hallway_1/Annotations +Area_2/hallway_2/Annotations +Area_2/hallway_3/Annotations +Area_2/hallway_4/Annotations +Area_2/hallway_5/Annotations +Area_2/hallway_6/Annotations +Area_2/hallway_7/Annotations +Area_2/hallway_8/Annotations +Area_2/hallway_9/Annotations +Area_2/office_10/Annotations +Area_2/office_11/Annotations +Area_2/office_12/Annotations +Area_2/office_13/Annotations +Area_2/office_14/Annotations +Area_2/office_1/Annotations +Area_2/office_2/Annotations +Area_2/office_3/Annotations +Area_2/office_4/Annotations +Area_2/office_5/Annotations +Area_2/office_6/Annotations +Area_2/office_7/Annotations +Area_2/office_8/Annotations +Area_2/office_9/Annotations +Area_2/storage_1/Annotations +Area_2/storage_2/Annotations +Area_2/storage_3/Annotations +Area_2/storage_4/Annotations +Area_2/storage_5/Annotations +Area_2/storage_6/Annotations +Area_2/storage_7/Annotations +Area_2/storage_8/Annotations +Area_2/storage_9/Annotations +Area_2/WC_1/Annotations +Area_2/WC_2/Annotations +Area_3/conferenceRoom_1/Annotations +Area_3/hallway_1/Annotations +Area_3/hallway_2/Annotations +Area_3/hallway_3/Annotations +Area_3/hallway_4/Annotations +Area_3/hallway_5/Annotations +Area_3/hallway_6/Annotations +Area_3/lounge_1/Annotations +Area_3/lounge_2/Annotations +Area_3/office_10/Annotations +Area_3/office_1/Annotations +Area_3/office_2/Annotations +Area_3/office_3/Annotations +Area_3/office_4/Annotations +Area_3/office_5/Annotations +Area_3/office_6/Annotations +Area_3/office_7/Annotations +Area_3/office_8/Annotations +Area_3/office_9/Annotations +Area_3/storage_1/Annotations +Area_3/storage_2/Annotations +Area_3/WC_1/Annotations +Area_3/WC_2/Annotations +Area_4/conferenceRoom_1/Annotations +Area_4/conferenceRoom_2/Annotations +Area_4/conferenceRoom_3/Annotations +Area_4/hallway_10/Annotations +Area_4/hallway_11/Annotations +Area_4/hallway_12/Annotations +Area_4/hallway_13/Annotations +Area_4/hallway_14/Annotations +Area_4/hallway_1/Annotations +Area_4/hallway_2/Annotations +Area_4/hallway_3/Annotations +Area_4/hallway_4/Annotations +Area_4/hallway_5/Annotations +Area_4/hallway_6/Annotations +Area_4/hallway_7/Annotations +Area_4/hallway_8/Annotations +Area_4/hallway_9/Annotations +Area_4/lobby_1/Annotations +Area_4/lobby_2/Annotations +Area_4/office_10/Annotations +Area_4/office_11/Annotations +Area_4/office_12/Annotations +Area_4/office_13/Annotations +Area_4/office_14/Annotations +Area_4/office_15/Annotations +Area_4/office_16/Annotations +Area_4/office_17/Annotations +Area_4/office_18/Annotations +Area_4/office_19/Annotations +Area_4/office_1/Annotations +Area_4/office_20/Annotations +Area_4/office_21/Annotations +Area_4/office_22/Annotations +Area_4/office_2/Annotations +Area_4/office_3/Annotations +Area_4/office_4/Annotations +Area_4/office_5/Annotations +Area_4/office_6/Annotations +Area_4/office_7/Annotations +Area_4/office_8/Annotations +Area_4/office_9/Annotations +Area_4/storage_1/Annotations +Area_4/storage_2/Annotations +Area_4/storage_3/Annotations +Area_4/storage_4/Annotations +Area_4/WC_1/Annotations +Area_4/WC_2/Annotations +Area_4/WC_3/Annotations +Area_4/WC_4/Annotations +Area_5/conferenceRoom_1/Annotations +Area_5/conferenceRoom_2/Annotations +Area_5/conferenceRoom_3/Annotations +Area_5/hallway_10/Annotations +Area_5/hallway_11/Annotations +Area_5/hallway_12/Annotations +Area_5/hallway_13/Annotations +Area_5/hallway_14/Annotations +Area_5/hallway_15/Annotations +Area_5/hallway_1/Annotations +Area_5/hallway_2/Annotations +Area_5/hallway_3/Annotations +Area_5/hallway_4/Annotations +Area_5/hallway_5/Annotations +Area_5/hallway_6/Annotations +Area_5/hallway_7/Annotations +Area_5/hallway_8/Annotations +Area_5/hallway_9/Annotations +Area_5/lobby_1/Annotations +Area_5/office_10/Annotations +Area_5/office_11/Annotations +Area_5/office_12/Annotations +Area_5/office_13/Annotations +Area_5/office_14/Annotations +Area_5/office_15/Annotations +Area_5/office_16/Annotations +Area_5/office_17/Annotations +Area_5/office_18/Annotations +Area_5/office_19/Annotations +Area_5/office_1/Annotations +Area_5/office_20/Annotations +Area_5/office_21/Annotations +Area_5/office_22/Annotations +Area_5/office_23/Annotations +Area_5/office_24/Annotations +Area_5/office_25/Annotations +Area_5/office_26/Annotations +Area_5/office_27/Annotations +Area_5/office_28/Annotations +Area_5/office_29/Annotations +Area_5/office_2/Annotations +Area_5/office_30/Annotations +Area_5/office_31/Annotations +Area_5/office_32/Annotations +Area_5/office_33/Annotations +Area_5/office_34/Annotations +Area_5/office_35/Annotations +Area_5/office_36/Annotations +Area_5/office_37/Annotations +Area_5/office_38/Annotations +Area_5/office_39/Annotations +Area_5/office_3/Annotations +Area_5/office_40/Annotations +Area_5/office_41/Annotations +Area_5/office_42/Annotations +Area_5/office_4/Annotations +Area_5/office_5/Annotations +Area_5/office_6/Annotations +Area_5/office_7/Annotations +Area_5/office_8/Annotations +Area_5/office_9/Annotations +Area_5/pantry_1/Annotations +Area_5/storage_1/Annotations +Area_5/storage_2/Annotations +Area_5/storage_3/Annotations +Area_5/storage_4/Annotations +Area_5/WC_1/Annotations +Area_5/WC_2/Annotations +Area_6/conferenceRoom_1/Annotations +Area_6/copyRoom_1/Annotations +Area_6/hallway_1/Annotations +Area_6/hallway_2/Annotations +Area_6/hallway_3/Annotations +Area_6/hallway_4/Annotations +Area_6/hallway_5/Annotations +Area_6/hallway_6/Annotations +Area_6/lounge_1/Annotations +Area_6/office_10/Annotations +Area_6/office_11/Annotations +Area_6/office_12/Annotations +Area_6/office_13/Annotations +Area_6/office_14/Annotations +Area_6/office_15/Annotations +Area_6/office_16/Annotations +Area_6/office_17/Annotations +Area_6/office_18/Annotations +Area_6/office_19/Annotations +Area_6/office_1/Annotations +Area_6/office_20/Annotations +Area_6/office_21/Annotations +Area_6/office_22/Annotations +Area_6/office_23/Annotations +Area_6/office_24/Annotations +Area_6/office_25/Annotations +Area_6/office_26/Annotations +Area_6/office_27/Annotations +Area_6/office_28/Annotations +Area_6/office_29/Annotations +Area_6/office_2/Annotations +Area_6/office_30/Annotations +Area_6/office_31/Annotations +Area_6/office_32/Annotations +Area_6/office_33/Annotations +Area_6/office_34/Annotations +Area_6/office_35/Annotations +Area_6/office_36/Annotations +Area_6/office_37/Annotations +Area_6/office_3/Annotations +Area_6/office_4/Annotations +Area_6/office_5/Annotations +Area_6/office_6/Annotations +Area_6/office_7/Annotations +Area_6/office_8/Annotations +Area_6/office_9/Annotations +Area_6/openspace_1/Annotations +Area_6/pantry_1/Annotations diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/area6_data_label.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/area6_data_label.txt new file mode 100644 index 000000000..d067baa09 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/area6_data_label.txt @@ -0,0 +1,48 @@ +data/stanford_indoor3d/Area_6_conferenceRoom_1.npy +data/stanford_indoor3d/Area_6_copyRoom_1.npy +data/stanford_indoor3d/Area_6_hallway_1.npy +data/stanford_indoor3d/Area_6_hallway_2.npy +data/stanford_indoor3d/Area_6_hallway_3.npy +data/stanford_indoor3d/Area_6_hallway_4.npy +data/stanford_indoor3d/Area_6_hallway_5.npy +data/stanford_indoor3d/Area_6_hallway_6.npy +data/stanford_indoor3d/Area_6_lounge_1.npy +data/stanford_indoor3d/Area_6_office_10.npy +data/stanford_indoor3d/Area_6_office_11.npy +data/stanford_indoor3d/Area_6_office_12.npy +data/stanford_indoor3d/Area_6_office_13.npy +data/stanford_indoor3d/Area_6_office_14.npy +data/stanford_indoor3d/Area_6_office_15.npy +data/stanford_indoor3d/Area_6_office_16.npy +data/stanford_indoor3d/Area_6_office_17.npy +data/stanford_indoor3d/Area_6_office_18.npy +data/stanford_indoor3d/Area_6_office_19.npy +data/stanford_indoor3d/Area_6_office_1.npy +data/stanford_indoor3d/Area_6_office_20.npy +data/stanford_indoor3d/Area_6_office_21.npy +data/stanford_indoor3d/Area_6_office_22.npy +data/stanford_indoor3d/Area_6_office_23.npy +data/stanford_indoor3d/Area_6_office_24.npy +data/stanford_indoor3d/Area_6_office_25.npy +data/stanford_indoor3d/Area_6_office_26.npy +data/stanford_indoor3d/Area_6_office_27.npy +data/stanford_indoor3d/Area_6_office_28.npy +data/stanford_indoor3d/Area_6_office_29.npy +data/stanford_indoor3d/Area_6_office_2.npy +data/stanford_indoor3d/Area_6_office_30.npy +data/stanford_indoor3d/Area_6_office_31.npy +data/stanford_indoor3d/Area_6_office_32.npy +data/stanford_indoor3d/Area_6_office_33.npy +data/stanford_indoor3d/Area_6_office_34.npy +data/stanford_indoor3d/Area_6_office_35.npy +data/stanford_indoor3d/Area_6_office_36.npy +data/stanford_indoor3d/Area_6_office_37.npy +data/stanford_indoor3d/Area_6_office_3.npy +data/stanford_indoor3d/Area_6_office_4.npy +data/stanford_indoor3d/Area_6_office_5.npy +data/stanford_indoor3d/Area_6_office_6.npy +data/stanford_indoor3d/Area_6_office_7.npy +data/stanford_indoor3d/Area_6_office_8.npy +data/stanford_indoor3d/Area_6_office_9.npy +data/stanford_indoor3d/Area_6_openspace_1.npy +data/stanford_indoor3d/Area_6_pantry_1.npy diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/class_names.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/class_names.txt new file mode 100644 index 000000000..ca1d17882 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/meta/class_names.txt @@ -0,0 +1,13 @@ +ceiling +floor +wall +beam +column +window +door +table +chair +sofa +bookcase +board +clutter diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/model.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/model.py new file mode 100644 index 000000000..9efdce3c0 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/model.py @@ -0,0 +1,106 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import tensorflow as tf +import math +import time +import numpy as np +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = os.path.dirname(BASE_DIR) +sys.path.append(os.path.join(ROOT_DIR, 'utils')) +import tf_util + +def placeholder_inputs(batch_size, num_point): + pointclouds_pl = tf.compat.v1.placeholder(tf.float32, + shape=(batch_size, num_point, 9)) + labels_pl = tf.compat.v1.placeholder(tf.int32, + shape=(batch_size, num_point)) + return pointclouds_pl, labels_pl + +def get_model(point_cloud, is_training, bn_decay=None): + """ ConvNet baseline, input is BxNx3 gray image """ + batch_size = point_cloud.get_shape()[0].value + num_point = point_cloud.get_shape()[1].value + + input_image = tf.expand_dims(point_cloud, -1) + # CONV + net = tf_util.conv2d(input_image, 64, [1,9], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay) + net = tf_util.conv2d(net, 64, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay) + net = tf_util.conv2d(net, 64, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay) + net = tf_util.conv2d(net, 128, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay) + points_feat1 = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay) + # MAX + pc_feat1 = tf_util.max_pool2d(points_feat1, [num_point,1], padding='VALID', scope='maxpool1') + # FC + pc_feat1 = tf.reshape(pc_feat1, [batch_size, -1]) + pc_feat1 = tf_util.fully_connected(pc_feat1, 256, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) + pc_feat1 = tf_util.fully_connected(pc_feat1, 128, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) + print(pc_feat1) + + # CONCAT + pc_feat1_expand = tf.tile(tf.reshape(pc_feat1, [batch_size, 1, 1, -1]), [1, num_point, 1, 1]) + points_feat1_concat = tf.concat(axis=3, values=[points_feat1, pc_feat1_expand]) + + # CONV + net = tf_util.conv2d(points_feat1_concat, 512, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='conv6') + net = tf_util.conv2d(net, 256, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='conv7') + net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1') + net = tf_util.conv2d(net, 13, [1,1], padding='VALID', stride=[1,1], + activation_fn=None, scope='conv8') + net = tf.squeeze(net, [2]) + + return net + +def get_loss(pred, label): + """ pred: B,N,13 + label: B,N """ + loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label) + return tf.reduce_mean(input_tensor=loss) + +if __name__ == "__main__": + with tf.Graph().as_default(): + a = tf.compat.v1.placeholder(tf.float32, shape=(32,4096,9)) + net = get_model(a, tf.constant(True)) + with tf.compat.v1.Session() as sess: + init = tf.compat.v1.global_variables_initializer() + sess.run(init) + start = time.time() + for i in range(100): + print(i) + sess.run(net, feed_dict={a:np.random.rand(32,4096,9)}) + print(time.time() - start) diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/train.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/train.py new file mode 100644 index 000000000..dace8d03b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/sem_seg/train.py @@ -0,0 +1,306 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import math +import h5py +import numpy as np +import tensorflow as tf +import socket + +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = os.path.dirname(BASE_DIR) +sys.path.append(BASE_DIR) +sys.path.append(ROOT_DIR) +sys.path.append(os.path.join(ROOT_DIR, 'utils')) +import provider +import tf_util +from model import * + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') +parser.add_argument('--log_dir', default='log', help='Log dir [default: log]') +parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]') +parser.add_argument('--max_epoch', type=int, default=50, help='Epoch to run [default: 50]') +parser.add_argument('--batch_size', type=int, default=24, help='Batch Size during training [default: 24]') +parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]') +parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]') +parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]') +parser.add_argument('--decay_step', type=int, default=300000, help='Decay step for lr decay [default: 300000]') +parser.add_argument('--decay_rate', type=float, default=0.5, help='Decay rate for lr decay [default: 0.5]') +parser.add_argument('--test_area', type=int, default=6, help='Which area to use for test, option: 1-6 [default: 6]') +FLAGS = parser.parse_args() + + +BATCH_SIZE = FLAGS.batch_size +NUM_POINT = FLAGS.num_point +MAX_EPOCH = FLAGS.max_epoch +NUM_POINT = FLAGS.num_point +BASE_LEARNING_RATE = FLAGS.learning_rate +GPU_INDEX = FLAGS.gpu +MOMENTUM = FLAGS.momentum +OPTIMIZER = FLAGS.optimizer +DECAY_STEP = FLAGS.decay_step +DECAY_RATE = FLAGS.decay_rate + +LOG_DIR = FLAGS.log_dir +if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR) +os.system('cp model.py %s' % (LOG_DIR)) # bkp of model def +os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure +LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w') +LOG_FOUT.write(str(FLAGS)+'\n') + +MAX_NUM_POINT = 4096 +NUM_CLASSES = 13 + +BN_INIT_DECAY = 0.5 +BN_DECAY_DECAY_RATE = 0.5 +#BN_DECAY_DECAY_STEP = float(DECAY_STEP * 2) +BN_DECAY_DECAY_STEP = float(DECAY_STEP) +BN_DECAY_CLIP = 0.99 + +HOSTNAME = socket.gethostname() + +ALL_FILES = provider.getDataFiles('indoor3d_sem_seg_hdf5_data/all_files.txt') +room_filelist = [line.rstrip() for line in open('indoor3d_sem_seg_hdf5_data/room_filelist.txt')] + +# Load ALL data +data_batch_list = [] +label_batch_list = [] +for h5_filename in ALL_FILES: + data_batch, label_batch = provider.loadDataFile(h5_filename) + data_batch_list.append(data_batch) + label_batch_list.append(label_batch) +data_batches = np.concatenate(data_batch_list, 0) +label_batches = np.concatenate(label_batch_list, 0) +print(data_batches.shape) +print(label_batches.shape) + +test_area = 'Area_'+str(FLAGS.test_area) +train_idxs = [] +test_idxs = [] +for i,room_name in enumerate(room_filelist): + if test_area in room_name: + test_idxs.append(i) + else: + train_idxs.append(i) + +train_data = data_batches[train_idxs,...] +train_label = label_batches[train_idxs] +test_data = data_batches[test_idxs,...] +test_label = label_batches[test_idxs] +print(train_data.shape, train_label.shape) +print(test_data.shape, test_label.shape) + + + + +def log_string(out_str): + LOG_FOUT.write(out_str+'\n') + LOG_FOUT.flush() + print(out_str) + + +def get_learning_rate(batch): + learning_rate = tf.compat.v1.train.exponential_decay( + BASE_LEARNING_RATE, # Base learning rate. + batch * BATCH_SIZE, # Current index into the dataset. + DECAY_STEP, # Decay step. + DECAY_RATE, # Decay rate. + staircase=True) + learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!! + return learning_rate + +def get_bn_decay(batch): + bn_momentum = tf.compat.v1.train.exponential_decay( + BN_INIT_DECAY, + batch*BATCH_SIZE, + BN_DECAY_DECAY_STEP, + BN_DECAY_DECAY_RATE, + staircase=True) + bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum) + return bn_decay + +def train(): + with tf.Graph().as_default(): + with tf.device('/gpu:'+str(GPU_INDEX)): + pointclouds_pl, labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT) + is_training_pl = tf.compat.v1.placeholder(tf.bool, shape=()) + + # Note the global_step=batch parameter to minimize. + # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains. + batch = tf.Variable(0) + bn_decay = get_bn_decay(batch) + tf.compat.v1.summary.scalar('bn_decay', bn_decay) + + # Get model and loss + pred = get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay) + loss = get_loss(pred, labels_pl) + tf.compat.v1.summary.scalar('loss', loss) + + correct = tf.equal(tf.argmax(input=pred, axis=2), tf.cast(labels_pl, dtype=tf.int64)) + accuracy = tf.reduce_sum(input_tensor=tf.cast(correct, tf.float32)) / float(BATCH_SIZE*NUM_POINT) + tf.compat.v1.summary.scalar('accuracy', accuracy) + + # Get training operator + learning_rate = get_learning_rate(batch) + tf.compat.v1.summary.scalar('learning_rate', learning_rate) + if OPTIMIZER == 'momentum': + optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM) + elif OPTIMIZER == 'adam': + optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate) + train_op = optimizer.minimize(loss, global_step=batch) + + # Add ops to save and restore all the variables. + saver = tf.compat.v1.train.Saver() + + # Create a session + config = tf.compat.v1.ConfigProto() + config.gpu_options.allow_growth = True + config.allow_soft_placement = True + config.log_device_placement = True + sess = tf.compat.v1.Session(config=config) + + # Add summary writers + merged = tf.compat.v1.summary.merge_all() + train_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'train'), + sess.graph) + test_writer = tf.compat.v1.summary.FileWriter(os.path.join(LOG_DIR, 'test')) + + # Init variables + init = tf.compat.v1.global_variables_initializer() + sess.run(init, {is_training_pl:True}) + + ops = {'pointclouds_pl': pointclouds_pl, + 'labels_pl': labels_pl, + 'is_training_pl': is_training_pl, + 'pred': pred, + 'loss': loss, + 'train_op': train_op, + 'merged': merged, + 'step': batch} + + for epoch in range(MAX_EPOCH): + log_string('**** EPOCH %03d ****' % (epoch)) + sys.stdout.flush() + + train_one_epoch(sess, ops, train_writer) + eval_one_epoch(sess, ops, test_writer) + + # Save the variables to disk. + if epoch % 10 == 0: + save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt")) + log_string("Model saved in file: %s" % save_path) + + + +def train_one_epoch(sess, ops, train_writer): + """ ops: dict mapping from string to tf ops """ + is_training = True + + log_string('----') + current_data, current_label, _ = provider.shuffle_data(train_data[:,0:NUM_POINT,:], train_label) + + file_size = current_data.shape[0] + num_batches = file_size // BATCH_SIZE + + total_correct = 0 + total_seen = 0 + loss_sum = 0 + + for batch_idx in range(num_batches): + if batch_idx % 100 == 0: + print('Current batch/total batch num: %d/%d'%(batch_idx,num_batches)) + start_idx = batch_idx * BATCH_SIZE + end_idx = (batch_idx+1) * BATCH_SIZE + + feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :], + ops['labels_pl']: current_label[start_idx:end_idx], + ops['is_training_pl']: is_training,} + summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']], + feed_dict=feed_dict) + train_writer.add_summary(summary, step) + pred_val = np.argmax(pred_val, 2) + correct = np.sum(pred_val == current_label[start_idx:end_idx]) + total_correct += correct + total_seen += (BATCH_SIZE*NUM_POINT) + loss_sum += loss_val + + log_string('mean loss: %f' % (loss_sum / float(num_batches))) + log_string('accuracy: %f' % (total_correct / float(total_seen))) + + +def eval_one_epoch(sess, ops, test_writer): + """ ops: dict mapping from string to tf ops """ + is_training = False + total_correct = 0 + total_seen = 0 + loss_sum = 0 + total_seen_class = [0 for _ in range(NUM_CLASSES)] + total_correct_class = [0 for _ in range(NUM_CLASSES)] + + log_string('----') + current_data = test_data[:,0:NUM_POINT,:] + current_label = np.squeeze(test_label) + + file_size = current_data.shape[0] + num_batches = file_size // BATCH_SIZE + + for batch_idx in range(num_batches): + start_idx = batch_idx * BATCH_SIZE + end_idx = (batch_idx+1) * BATCH_SIZE + + feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :], + ops['labels_pl']: current_label[start_idx:end_idx], + ops['is_training_pl']: is_training} + summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['loss'], ops['pred']], + feed_dict=feed_dict) + test_writer.add_summary(summary, step) + pred_val = np.argmax(pred_val, 2) + correct = np.sum(pred_val == current_label[start_idx:end_idx]) + total_correct += correct + total_seen += (BATCH_SIZE*NUM_POINT) + loss_sum += (loss_val*BATCH_SIZE) + for i in range(start_idx, end_idx): + for j in range(NUM_POINT): + l = current_label[i, j] + total_seen_class[l] += 1 + total_correct_class[l] += (pred_val[i-start_idx, j] == l) + + log_string('eval mean loss: %f' % (loss_sum / float(total_seen/NUM_POINT))) + log_string('eval accuracy: %f'% (total_correct / float(total_seen))) + log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) + + + +if __name__ == "__main__": + train() + LOG_FOUT.close() -- Gitee From d7c08c3e64117685ad13688958b27311de93d7bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:33:16 +0000 Subject: [PATCH 25/54] =?UTF-8?q?PointNet=5FID2913=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../models/pointnet_cls.py | 136 +++++++++++++++++ .../models/pointnet_cls_basic.py | 105 +++++++++++++ .../models/pointnet_seg.py | 143 ++++++++++++++++++ .../models/transform_nets.py | 140 +++++++++++++++++ 4 files changed, 524 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls_basic.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_seg.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/transform_nets.py diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls.py new file mode 100644 index 000000000..0922464e0 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls.py @@ -0,0 +1,136 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import tensorflow as tf +import numpy as np +import math +import sys +import os +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(BASE_DIR, '../utils')) +import tf_util +from transform_nets import input_transform_net, feature_transform_net + +def placeholder_inputs(batch_size, num_point): + # placeholder(),占位符,在tensorflow中类似于函数参数,运行时必须传入值。 + # dtype:数据类型。shape:数据形状。 + pointclouds_pl = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, num_point, 3)) + labels_pl = tf.compat.v1.placeholder(tf.int32, shape=(batch_size)) + return pointclouds_pl, labels_pl + + +def get_model(point_cloud, is_training, bn_decay=None): + """ Classification PointNet, input is BxNx3, output Bx40 """ + # 把上面函数封装的设定值拿出来 + # batch_size = point_cloud.get_shape()[0].value + batch_size = point_cloud.get_shape()[0] + num_point = point_cloud.get_shape()[1] + end_points = {} + + # tf.variable_scope可以让不同命名空间中的变量取相同的名字,无论tf.get_variable或者tf.Variable生成的变量 + with tf.compat.v1.variable_scope('transform_net1') as sc: + # 因此point_cloud, 32x1024x3的tensor,is_training为bool类型tansor,shape未指定,bn_decay为学习率, + # 此学习率随着trainprocess按照指数function慢慢递增,K按照此函数解释, + # 为transformnet的卷积核的维度3xK维,则最后返回的transform为32x3x3的一个tensor。 + transform = input_transform_net(point_cloud, is_training, bn_decay, K=3) + # 将矩阵a乘以矩阵b, 生成a * b + point_cloud_transformed = tf.matmul(point_cloud, transform) + # 在张量形状的维度增加一维,-1表示在最后一维后面增加 + input_image = tf.expand_dims(point_cloud_transformed, -1) + + net = tf_util.conv2d(input_image, 64, [1,3], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv1', bn_decay=bn_decay) + net = tf_util.conv2d(net, 64, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv2', bn_decay=bn_decay) + + with tf.compat.v1.variable_scope('transform_net2') as sc: + transform = feature_transform_net(net, is_training, bn_decay, K=64) + end_points['transform'] = transform + net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform) + net_transformed = tf.expand_dims(net_transformed, [2]) + + net = tf_util.conv2d(net_transformed, 64, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv3', bn_decay=bn_decay) + net = tf_util.conv2d(net, 128, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv4', bn_decay=bn_decay) + net = tf_util.conv2d(net, 1024, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv5', bn_decay=bn_decay) + + # Symmetric function: max pooling + net = tf_util.max_pool2d(net, [num_point,1], + padding='VALID', scope='maxpool') + + net = tf.reshape(net, [batch_size, -1]) + net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, + scope='fc1', bn_decay=bn_decay) + net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, + scope='dp1') + net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, + scope='fc2', bn_decay=bn_decay) + net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, + scope='dp2') + net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3') + + return net, end_points + + +def get_loss(pred, label, end_points, reg_weight=0.001): + """ pred: B*NUM_CLASSES, + label: B, """ + loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label) + classify_loss = tf.reduce_mean(input_tensor=loss) + tf.compat.v1.summary.scalar('classify loss', classify_loss) + + # Enforce the transformation as orthogonal matrix + transform = end_points['transform'] # BxKxK + K = transform.get_shape()[1] + mat_diff = tf.matmul(transform, tf.transpose(a=transform, perm=[0,2,1])) + mat_diff -= tf.constant(np.eye(K), dtype=tf.float32) + mat_diff_loss = tf.nn.l2_loss(mat_diff) + tf.compat.v1.summary.scalar('mat loss', mat_diff_loss) + + return classify_loss + mat_diff_loss * reg_weight + + +if __name__=='__main__': + with tf.Graph().as_default(): + inputs = tf.zeros((32,1024,3)) + outputs = get_model(inputs, tf.constant(True)) + print(outputs) diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls_basic.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls_basic.py new file mode 100644 index 000000000..de16d003e --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_cls_basic.py @@ -0,0 +1,105 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import tensorflow as tf +import numpy as np +import math +import sys +import os +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(BASE_DIR, '../utils')) +import tf_util + +def placeholder_inputs(batch_size, num_point): + pointclouds_pl = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, num_point, 3)) + labels_pl = tf.compat.v1.placeholder(tf.int32, shape=(batch_size)) + return pointclouds_pl, labels_pl + + +def get_model(point_cloud, is_training, bn_decay=None): + """ Classification PointNet, input is BxNx3, output Bx40 """ + batch_size = point_cloud.get_shape()[0].value + num_point = point_cloud.get_shape()[1].value + end_points = {} + input_image = tf.expand_dims(point_cloud, -1) + + # Point functions (MLP implemented as conv2d) + net = tf_util.conv2d(input_image, 64, [1,3], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv1', bn_decay=bn_decay) + net = tf_util.conv2d(net, 64, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv2', bn_decay=bn_decay) + net = tf_util.conv2d(net, 64, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv3', bn_decay=bn_decay) + net = tf_util.conv2d(net, 128, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv4', bn_decay=bn_decay) + net = tf_util.conv2d(net, 1024, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv5', bn_decay=bn_decay) + + # Symmetric function: max pooling + net = tf_util.max_pool2d(net, [num_point,1], + padding='VALID', scope='maxpool') + + # MLP on global point cloud vector + net = tf.reshape(net, [batch_size, -1]) + net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, + scope='fc1', bn_decay=bn_decay) + net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, + scope='fc2', bn_decay=bn_decay) + net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, + scope='dp1') + net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3') + + return net, end_points + + +def get_loss(pred, label, end_points): + """ pred: B*NUM_CLASSES, + label: B, """ + loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label) + classify_loss = tf.reduce_mean(input_tensor=loss) + tf.compat.v1.summary.scalar('classify loss', classify_loss) + return classify_loss + + +if __name__=='__main__': + with tf.Graph().as_default(): + inputs = tf.zeros((32,1024,3)) + outputs = get_model(inputs, tf.constant(True)) + print(outputs) diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_seg.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_seg.py new file mode 100644 index 000000000..a872aeb87 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/pointnet_seg.py @@ -0,0 +1,143 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import tensorflow as tf +import numpy as np +import math +import sys +import os +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(BASE_DIR, '../utils')) +import tf_util +from transform_nets import input_transform_net, feature_transform_net + +def placeholder_inputs(batch_size, num_point): + pointclouds_pl = tf.compat.v1.placeholder(tf.float32, + shape=(batch_size, num_point, 3)) + labels_pl = tf.compat.v1.placeholder(tf.int32, + shape=(batch_size, num_point)) + return pointclouds_pl, labels_pl + + +def get_model(point_cloud, is_training, bn_decay=None): + """ Classification PointNet, input is BxNx3, output BxNx50 """ + batch_size = point_cloud.get_shape()[0].value + num_point = point_cloud.get_shape()[1].value + end_points = {} + + with tf.compat.v1.variable_scope('transform_net1') as sc: + transform = input_transform_net(point_cloud, is_training, bn_decay, K=3) + point_cloud_transformed = tf.matmul(point_cloud, transform) + input_image = tf.expand_dims(point_cloud_transformed, -1) + + net = tf_util.conv2d(input_image, 64, [1,3], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv1', bn_decay=bn_decay) + net = tf_util.conv2d(net, 64, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv2', bn_decay=bn_decay) + + with tf.compat.v1.variable_scope('transform_net2') as sc: + transform = feature_transform_net(net, is_training, bn_decay, K=64) + end_points['transform'] = transform + net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform) + point_feat = tf.expand_dims(net_transformed, [2]) + print(point_feat) + + net = tf_util.conv2d(point_feat, 64, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv3', bn_decay=bn_decay) + net = tf_util.conv2d(net, 128, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv4', bn_decay=bn_decay) + net = tf_util.conv2d(net, 1024, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv5', bn_decay=bn_decay) + global_feat = tf_util.max_pool2d(net, [num_point,1], + padding='VALID', scope='maxpool') + print(global_feat) + + global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1]) + concat_feat = tf.concat(3, [point_feat, global_feat_expand]) + print(concat_feat) + + net = tf_util.conv2d(concat_feat, 512, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv6', bn_decay=bn_decay) + net = tf_util.conv2d(net, 256, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv7', bn_decay=bn_decay) + net = tf_util.conv2d(net, 128, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv8', bn_decay=bn_decay) + net = tf_util.conv2d(net, 128, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='conv9', bn_decay=bn_decay) + + net = tf_util.conv2d(net, 50, [1,1], + padding='VALID', stride=[1,1], activation_fn=None, + scope='conv10') + net = tf.squeeze(net, [2]) # BxNxC + + return net, end_points + + +def get_loss(pred, label, end_points, reg_weight=0.001): + """ pred: BxNxC, + label: BxN, """ + loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label) + classify_loss = tf.reduce_mean(input_tensor=loss) + tf.scalar_summary('classify loss', classify_loss) + + # Enforce the transformation as orthogonal matrix + transform = end_points['transform'] # BxKxK + K = transform.get_shape()[1].value + mat_diff = tf.matmul(transform, tf.transpose(a=transform, perm=[0,2,1])) + mat_diff -= tf.constant(np.eye(K), dtype=tf.float32) + mat_diff_loss = tf.nn.l2_loss(mat_diff) + tf.scalar_summary('mat_loss', mat_diff_loss) + + return classify_loss + mat_diff_loss * reg_weight + + +if __name__=='__main__': + with tf.Graph().as_default(): + inputs = tf.zeros((32,1024,3)) + outputs = get_model(inputs, tf.constant(True)) + print(outputs) diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/transform_nets.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/transform_nets.py new file mode 100644 index 000000000..6b1c560bd --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/models/transform_nets.py @@ -0,0 +1,140 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import tensorflow as tf +import numpy as np +import sys +import os +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(BASE_DIR, '../utils')) +import tf_util + +def input_transform_net(point_cloud, is_training, bn_decay=None, K=3): + """ Input (XYZ) Transform Net, input is BxNx3 gray image + Return: + Transformation matrix of size 3xK """ + # 第一二句为获取point的shape,bitchsize = 32,pointnum = 1024 + batch_size = point_cloud.get_shape()[0] + num_point = point_cloud.get_shape()[1] + + # 第三句将输入的pointcloud拓展一维,变为32x1024x3x1的tensor,inputimage。 + input_image = tf.expand_dims(point_cloud, -1) + + # 第四、五、六句,则为搭建卷积层的过程,通过tf_util.conv2d函数实现。参考pointnet学习(八)tf_util.conv2d + # 第一层卷积“tconv1”输出output(shpe[32,1024,1,64]) + net = tf_util.conv2d(input_image, 64, [1,3], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='tconv1', bn_decay=bn_decay) + # 第二层“tconv2”输出output(shpe[32,1024,1,128]) + net = tf_util.conv2d(net, 128, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='tconv2', bn_decay=bn_decay) + # 第三层“tconv3”输出output(shpe[32,1024,1,1024]) + net = tf_util.conv2d(net, 1024, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='tconv3', bn_decay=bn_decay) + # 第七句为搭建maxpool层。因此“transform_net1”包括三个2d卷积层以及一个maxpoling层“tmaxpool”。 + # 输出为shape[32, 1, 1, 1024]的tensor + # 参考pointnet tf_util.max_pool2d + # 因为h, w, 都是1,所以可以将32个batch对应的每个input计算出来的1024个channel值取出来进行计算。 + net = tf_util.max_pool2d(net, [num_point,1], + padding='VALID', scope='tmaxpool') + + net = tf.reshape(net, [batch_size, -1]) + # 将net通过一个fullyconnect层进行计算。计算之后net为32,256的tensor + net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, + scope='tfc1', bn_decay=bn_decay) + net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, + scope='tfc2', bn_decay=bn_decay) + + # 再后面的操作,则是对fullyconnect的输出,乘以一个weight,256,3 * k(k = 3) + # 再加一个初始化为[1, 0, 0, 0, 1, 0, 0, 0, 1],shape为9的tensor + # biases最后得到32,9的tensor + # transform,再reshape成32,3,3的tensor,供后续预测对pointnet进行旋转, + with tf.compat.v1.variable_scope('transform_XYZ') as sc: + assert(K==3) + weights = tf.compat.v1.get_variable('weights', [256, 3*K], + initializer=tf.compat.v1.constant_initializer(0.0), + dtype=tf.float32) + biases = tf.compat.v1.get_variable('biases', [3*K], + initializer=tf.compat.v1.constant_initializer(0.0), + dtype=tf.float32) + biases = biases + tf.constant([1,0,0,0,1,0,0,0,1], dtype=tf.float32) + transform = tf.matmul(net, weights) + transform = tf.nn.bias_add(transform, biases) + + transform = tf.reshape(transform, [batch_size, 3, K]) + return transform + + +def feature_transform_net(inputs, is_training, bn_decay=None, K=64): + """ Feature Transform Net, input is BxNx1xK + Return: + Transformation matrix of size KxK """ + batch_size = inputs.get_shape()[0] + num_point = inputs.get_shape()[1] + + net = tf_util.conv2d(inputs, 64, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='tconv1', bn_decay=bn_decay) + net = tf_util.conv2d(net, 128, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='tconv2', bn_decay=bn_decay) + net = tf_util.conv2d(net, 1024, [1,1], + padding='VALID', stride=[1,1], + bn=True, is_training=is_training, + scope='tconv3', bn_decay=bn_decay) + net = tf_util.max_pool2d(net, [num_point,1], + padding='VALID', scope='tmaxpool') + + net = tf.reshape(net, [batch_size, -1]) + net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, + scope='tfc1', bn_decay=bn_decay) + net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, + scope='tfc2', bn_decay=bn_decay) + + with tf.compat.v1.variable_scope('transform_feat') as sc: + weights = tf.compat.v1.get_variable('weights', [256, K*K], + initializer=tf.compat.v1.constant_initializer(0.0), + dtype=tf.float32) + biases = tf.compat.v1.get_variable('biases', [K*K], + initializer=tf.compat.v1.constant_initializer(0.0), + dtype=tf.float32) + biases = biases + tf.constant(np.eye(K).flatten(), dtype=tf.float32) + transform = tf.matmul(net, weights) + transform = tf.nn.bias_add(transform, biases) + + transform = tf.reshape(transform, [batch_size, K, K]) + return transform -- Gitee From b7266bd2ab76d249f868ecfe85760f9fcaa44eb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:34:05 +0000 Subject: [PATCH 26/54] PointNet_ID2913_for_TensorFlow2.X --- .../part_seg/download_data.sh | 12 + .../part_seg/pointnet_part_seg.py | 189 ++++++++ .../part_seg/test.py | 299 +++++++++++++ .../part_seg/train.py | 422 ++++++++++++++++++ 4 files changed, 922 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/download_data.sh create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/pointnet_part_seg.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/test.py create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/train.py diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/download_data.sh b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/download_data.sh new file mode 100644 index 000000000..70f78dbf2 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/download_data.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# Download original ShapeNetPart dataset (around 1GB) +wget https://shapenet.cs.stanford.edu/ericyi/shapenetcore_partanno_v0.zip +unzip shapenetcore_partanno_v0.zip +rm shapenetcore_partanno_v0.zip + +# Download HDF5 for ShapeNet Part segmentation (around 346MB) +wget https://shapenet.cs.stanford.edu/media/shapenet_part_seg_hdf5_data.zip +unzip shapenet_part_seg_hdf5_data.zip +rm shapenet_part_seg_hdf5_data.zip + diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/pointnet_part_seg.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/pointnet_part_seg.py new file mode 100644 index 000000000..ed475cddf --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/pointnet_part_seg.py @@ -0,0 +1,189 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import tensorflow as tf +import numpy as np +import math +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.dirname(BASE_DIR)) +sys.path.append(os.path.join(BASE_DIR, '../utils')) +import tf_util + + +def get_transform_K(inputs, is_training, bn_decay=None, K = 3): + """ Transform Net, input is BxNx1xK gray image + Return: + Transformation matrix of size KxK """ + batch_size = inputs.get_shape()[0].value + num_point = inputs.get_shape()[1].value + + net = tf_util.conv2d(inputs, 256, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='tconv1', bn_decay=bn_decay) + net = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='tconv2', bn_decay=bn_decay) + net = tf_util.max_pool2d(net, [num_point,1], padding='VALID', scope='tmaxpool') + + net = tf.reshape(net, [batch_size, -1]) + net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='tfc1', bn_decay=bn_decay) + net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='tfc2', bn_decay=bn_decay) + + with tf.compat.v1.variable_scope('transform_feat') as sc: + weights = tf.compat.v1.get_variable('weights', [256, K*K], initializer=tf.compat.v1.constant_initializer(0.0), dtype=tf.float32) + biases = tf.compat.v1.get_variable('biases', [K*K], initializer=tf.compat.v1.constant_initializer(0.0), dtype=tf.float32) + tf.constant(np.eye(K).flatten(), dtype=tf.float32) + transform = tf.matmul(net, weights) + transform = tf.nn.bias_add(transform, biases) + + #transform = tf_util.fully_connected(net, 3*K, activation_fn=None, scope='tfc3') + transform = tf.reshape(transform, [batch_size, K, K]) + return transform + + + + + +def get_transform(point_cloud, is_training, bn_decay=None, K = 3): + """ Transform Net, input is BxNx3 gray image + Return: + Transformation matrix of size 3xK """ + batch_size = point_cloud.get_shape()[0].value + num_point = point_cloud.get_shape()[1].value + + input_image = tf.expand_dims(point_cloud, -1) + net = tf_util.conv2d(input_image, 64, [1,3], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='tconv1', bn_decay=bn_decay) + net = tf_util.conv2d(net, 128, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='tconv3', bn_decay=bn_decay) + net = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='tconv4', bn_decay=bn_decay) + net = tf_util.max_pool2d(net, [num_point,1], padding='VALID', scope='tmaxpool') + + net = tf.reshape(net, [batch_size, -1]) + net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc1', bn_decay=bn_decay) + net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc2', bn_decay=bn_decay) + + with tf.compat.v1.variable_scope('transform_XYZ') as sc: + assert(K==3) + weights = tf.compat.v1.get_variable('weights', [128, 3*K], initializer=tf.compat.v1.constant_initializer(0.0), dtype=tf.float32) + biases = tf.compat.v1.get_variable('biases', [3*K], initializer=tf.compat.v1.constant_initializer(0.0), dtype=tf.float32) + tf.constant([1,0,0,0,1,0,0,0,1], dtype=tf.float32) + transform = tf.matmul(net, weights) + transform = tf.nn.bias_add(transform, biases) + + #transform = tf_util.fully_connected(net, 3*K, activation_fn=None, scope='tfc3') + transform = tf.reshape(transform, [batch_size, 3, K]) + return transform + + +def get_model(point_cloud, input_label, is_training, cat_num, part_num, \ + batch_size, num_point, weight_decay, bn_decay=None): + """ ConvNet baseline, input is BxNx3 gray image """ + end_points = {} + + with tf.compat.v1.variable_scope('transform_net1') as sc: + K = 3 + transform = get_transform(point_cloud, is_training, bn_decay, K = 3) + point_cloud_transformed = tf.matmul(point_cloud, transform) + + input_image = tf.expand_dims(point_cloud_transformed, -1) + out1 = tf_util.conv2d(input_image, 64, [1,K], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay) + out2 = tf_util.conv2d(out1, 128, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay) + out3 = tf_util.conv2d(out2, 128, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay) + + + with tf.compat.v1.variable_scope('transform_net2') as sc: + K = 128 + transform = get_transform_K(out3, is_training, bn_decay, K) + + end_points['transform'] = transform + + squeezed_out3 = tf.reshape(out3, [batch_size, num_point, 128]) + net_transformed = tf.matmul(squeezed_out3, transform) + net_transformed = tf.expand_dims(net_transformed, [2]) + + out4 = tf_util.conv2d(net_transformed, 512, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay) + out5 = tf_util.conv2d(out4, 2048, [1,1], padding='VALID', stride=[1,1], + bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay) + out_max = tf_util.max_pool2d(out5, [num_point,1], padding='VALID', scope='maxpool') + + # classification network + net = tf.reshape(out_max, [batch_size, -1]) + net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='cla/fc1', bn_decay=bn_decay) + net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='cla/fc2', bn_decay=bn_decay) + net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='cla/dp1') + net = tf_util.fully_connected(net, cat_num, activation_fn=None, scope='cla/fc3') + + # segmentation network + one_hot_label_expand = tf.reshape(input_label, [batch_size, 1, 1, cat_num]) + out_max = tf.concat(axis=3, values=[out_max, one_hot_label_expand]) + + expand = tf.tile(out_max, [1, num_point, 1, 1]) + concat = tf.concat(axis=3, values=[expand, out1, out2, out3, out4, out5]) + + net2 = tf_util.conv2d(concat, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay, + bn=True, is_training=is_training, scope='seg/conv1', weight_decay=weight_decay) + net2 = tf_util.dropout(net2, keep_prob=0.8, is_training=is_training, scope='seg/dp1') + net2 = tf_util.conv2d(net2, 256, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay, + bn=True, is_training=is_training, scope='seg/conv2', weight_decay=weight_decay) + net2 = tf_util.dropout(net2, keep_prob=0.8, is_training=is_training, scope='seg/dp2') + net2 = tf_util.conv2d(net2, 128, [1,1], padding='VALID', stride=[1,1], bn_decay=bn_decay, + bn=True, is_training=is_training, scope='seg/conv3', weight_decay=weight_decay) + net2 = tf_util.conv2d(net2, part_num, [1,1], padding='VALID', stride=[1,1], activation_fn=None, + bn=False, scope='seg/conv4', weight_decay=weight_decay) + + net2 = tf.reshape(net2, [batch_size, num_point, part_num]) + + return net, net2, end_points + +def get_loss(l_pred, seg_pred, label, seg, weight, end_points): + per_instance_label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=l_pred, labels=label) + label_loss = tf.reduce_mean(input_tensor=per_instance_label_loss) + + # size of seg_pred is batch_size x point_num x part_cat_num + # size of seg is batch_size x point_num + per_instance_seg_loss = tf.reduce_mean(input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_pred, labels=seg), axis=1) + seg_loss = tf.reduce_mean(input_tensor=per_instance_seg_loss) + + per_instance_seg_pred_res = tf.argmax(input=seg_pred, axis=2) + + # Enforce the transformation as orthogonal matrix + transform = end_points['transform'] # BxKxK + K = transform.get_shape()[1].value + mat_diff = tf.matmul(transform, tf.transpose(a=transform, perm=[0,2,1])) - tf.constant(np.eye(K), dtype=tf.float32) + mat_diff_loss = tf.nn.l2_loss(mat_diff) + + + total_loss = weight * seg_loss + (1 - weight) * label_loss + mat_diff_loss * 1e-3 + + return total_loss, label_loss, per_instance_label_loss, seg_loss, per_instance_seg_loss, per_instance_seg_pred_res + diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/test.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/test.py new file mode 100644 index 000000000..5c2d5f35c --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/test.py @@ -0,0 +1,299 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import tensorflow as tf +import json +import numpy as np +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +sys.path.append(os.path.dirname(BASE_DIR)) +import provider +import pointnet_part_seg as model + +parser = argparse.ArgumentParser() +parser.add_argument('--model_path', default='train_results/trained_models/epoch_190.ckpt', help='Model checkpoint path') +FLAGS = parser.parse_args() + + +# DEFAULT SETTINGS +pretrained_model_path = FLAGS.model_path # os.path.join(BASE_DIR, './pretrained_model/model.ckpt') +hdf5_data_dir = os.path.join(BASE_DIR, './hdf5_data') +ply_data_dir = os.path.join(BASE_DIR, './PartAnnotation') +gpu_to_use = 0 +output_dir = os.path.join(BASE_DIR, './test_results') +output_verbose = True # If true, output all color-coded part segmentation obj files + +# MAIN SCRIPT +point_num = 3000 # the max number of points in the all testing data shapes +batch_size = 1 + +test_file_list = os.path.join(BASE_DIR, 'testing_ply_file_list.txt') + +oid2cpid = json.load(open(os.path.join(hdf5_data_dir, 'overallid_to_catid_partid.json'), 'r')) + +object2setofoid = {} +for idx in range(len(oid2cpid)): + objid, pid = oid2cpid[idx] + if not objid in object2setofoid.keys(): + object2setofoid[objid] = [] + object2setofoid[objid].append(idx) + +all_obj_cat_file = os.path.join(hdf5_data_dir, 'all_object_categories.txt') +fin = open(all_obj_cat_file, 'r') +lines = [line.rstrip() for line in fin.readlines()] +objcats = [line.split()[1] for line in lines] +objnames = [line.split()[0] for line in lines] +on2oid = {objcats[i]:i for i in range(len(objcats))} +fin.close() + +color_map_file = os.path.join(hdf5_data_dir, 'part_color_mapping.json') +color_map = json.load(open(color_map_file, 'r')) + +NUM_OBJ_CATS = 16 +NUM_PART_CATS = 50 + +cpid2oid = json.load(open(os.path.join(hdf5_data_dir, 'catid_partid_to_overallid.json'), 'r')) + +def printout(flog, data): + print(data) + flog.write(data + '\n') + +def output_color_point_cloud(data, seg, out_file): + with open(out_file, 'w') as f: + l = len(seg) + for i in range(l): + color = color_map[seg[i]] + f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], color[0], color[1], color[2])) + +def output_color_point_cloud_red_blue(data, seg, out_file): + with open(out_file, 'w') as f: + l = len(seg) + for i in range(l): + if seg[i] == 1: + color = [0, 0, 1] + elif seg[i] == 0: + color = [1, 0, 0] + else: + color = [0, 0, 0] + + f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], color[0], color[1], color[2])) + + +def pc_normalize(pc): + l = pc.shape[0] + centroid = np.mean(pc, axis=0) + pc = pc - centroid + m = np.max(np.sqrt(np.sum(pc**2, axis=1))) + pc = pc / m + return pc + +def placeholder_inputs(): + pointclouds_ph = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, point_num, 3)) + input_label_ph = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, NUM_OBJ_CATS)) + return pointclouds_ph, input_label_ph + +def output_color_point_cloud(data, seg, out_file): + with open(out_file, 'w') as f: + l = len(seg) + for i in range(l): + color = color_map[seg[i]] + f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], color[0], color[1], color[2])) + +def load_pts_seg_files(pts_file, seg_file, catid): + with open(pts_file, 'r') as f: + pts_str = [item.rstrip() for item in f.readlines()] + pts = np.array([np.float32(s.split()) for s in pts_str], dtype=np.float32) + with open(seg_file, 'r') as f: + part_ids = np.array([int(item.rstrip()) for item in f.readlines()], dtype=np.uint8) + seg = np.array([cpid2oid[catid+'_'+str(x)] for x in part_ids]) + return pts, seg + +def pc_augment_to_point_num(pts, pn): + assert(pts.shape[0] <= pn) + cur_len = pts.shape[0] + res = np.array(pts) + while cur_len < pn: + res = np.concatenate((res, pts)) + cur_len += pts.shape[0] + return res[:pn, :] + +def convert_label_to_one_hot(labels): + label_one_hot = np.zeros((labels.shape[0], NUM_OBJ_CATS)) + for idx in range(labels.shape[0]): + label_one_hot[idx, labels[idx]] = 1 + return label_one_hot + +def predict(): + is_training = False + + with tf.device('/gpu:'+str(gpu_to_use)): + pointclouds_ph, input_label_ph = placeholder_inputs() + is_training_ph = tf.compat.v1.placeholder(tf.bool, shape=()) + + # simple model + pred, seg_pred, end_points = model.get_model(pointclouds_ph, input_label_ph, \ + cat_num=NUM_OBJ_CATS, part_num=NUM_PART_CATS, is_training=is_training_ph, \ + batch_size=batch_size, num_point=point_num, weight_decay=0.0, bn_decay=None) + + # Add ops to save and restore all the variables. + saver = tf.compat.v1.train.Saver() + + # Later, launch the model, use the saver to restore variables from disk, and + # do some work with the model. + + config = tf.compat.v1.ConfigProto() + config.gpu_options.allow_growth = True + config.allow_soft_placement = True + + with tf.compat.v1.Session(config=config) as sess: + if not os.path.exists(output_dir): + os.mkdir(output_dir) + + flog = open(os.path.join(output_dir, 'log.txt'), 'w') + + # Restore variables from disk. + printout(flog, 'Loading model %s' % pretrained_model_path) + saver.restore(sess, pretrained_model_path) + printout(flog, 'Model restored.') + + # Note: the evaluation for the model with BN has to have some statistics + # Using some test datas as the statistics + batch_data = np.zeros([batch_size, point_num, 3]).astype(np.float32) + + total_acc = 0.0 + total_seen = 0 + total_acc_iou = 0.0 + + total_per_cat_acc = np.zeros((NUM_OBJ_CATS)).astype(np.float32) + total_per_cat_iou = np.zeros((NUM_OBJ_CATS)).astype(np.float32) + total_per_cat_seen = np.zeros((NUM_OBJ_CATS)).astype(np.int32) + + ffiles = open(test_file_list, 'r') + lines = [line.rstrip() for line in ffiles.readlines()] + pts_files = [line.split()[0] for line in lines] + seg_files = [line.split()[1] for line in lines] + labels = [line.split()[2] for line in lines] + ffiles.close() + + len_pts_files = len(pts_files) + for shape_idx in range(len_pts_files): + if shape_idx % 100 == 0: + printout(flog, '%d/%d ...' % (shape_idx, len_pts_files)) + + cur_gt_label = on2oid[labels[shape_idx]] + + cur_label_one_hot = np.zeros((1, NUM_OBJ_CATS), dtype=np.float32) + cur_label_one_hot[0, cur_gt_label] = 1 + + pts_file_to_load = os.path.join(ply_data_dir, pts_files[shape_idx]) + seg_file_to_load = os.path.join(ply_data_dir, seg_files[shape_idx]) + + pts, seg = load_pts_seg_files(pts_file_to_load, seg_file_to_load, objcats[cur_gt_label]) + ori_point_num = len(seg) + + batch_data[0, ...] = pc_augment_to_point_num(pc_normalize(pts), point_num) + + label_pred_val, seg_pred_res = sess.run([pred, seg_pred], feed_dict={ + pointclouds_ph: batch_data, + input_label_ph: cur_label_one_hot, + is_training_ph: is_training, + }) + + label_pred_val = np.argmax(label_pred_val[0, :]) + + seg_pred_res = seg_pred_res[0, ...] + + iou_oids = object2setofoid[objcats[cur_gt_label]] + non_cat_labels = list(set(np.arange(NUM_PART_CATS)).difference(set(iou_oids))) + + mini = np.min(seg_pred_res) + seg_pred_res[:, non_cat_labels] = mini - 1000 + + seg_pred_val = np.argmax(seg_pred_res, axis=1)[:ori_point_num] + + seg_acc = np.mean(seg_pred_val == seg) + + total_acc += seg_acc + total_seen += 1 + + total_per_cat_seen[cur_gt_label] += 1 + total_per_cat_acc[cur_gt_label] += seg_acc + + mask = np.int32(seg_pred_val == seg) + + total_iou = 0.0 + iou_log = '' + for oid in iou_oids: + n_pred = np.sum(seg_pred_val == oid) + n_gt = np.sum(seg == oid) + n_intersect = np.sum(np.int32(seg == oid) * mask) + n_union = n_pred + n_gt - n_intersect + iou_log += '_' + str(n_pred)+'_'+str(n_gt)+'_'+str(n_intersect)+'_'+str(n_union)+'_' + if n_union == 0: + total_iou += 1 + iou_log += '_1\n' + else: + total_iou += n_intersect * 1.0 / n_union + iou_log += '_'+str(n_intersect * 1.0 / n_union)+'\n' + + avg_iou = total_iou / len(iou_oids) + total_acc_iou += avg_iou + total_per_cat_iou[cur_gt_label] += avg_iou + + if output_verbose: + output_color_point_cloud(pts, seg, os.path.join(output_dir, str(shape_idx)+'_gt.obj')) + output_color_point_cloud(pts, seg_pred_val, os.path.join(output_dir, str(shape_idx)+'_pred.obj')) + output_color_point_cloud_red_blue(pts, np.int32(seg == seg_pred_val), + os.path.join(output_dir, str(shape_idx)+'_diff.obj')) + + with open(os.path.join(output_dir, str(shape_idx)+'.log'), 'w') as fout: + fout.write('Total Point: %d\n\n' % ori_point_num) + fout.write('Ground Truth: %s\n' % objnames[cur_gt_label]) + fout.write('Predict: %s\n\n' % objnames[label_pred_val]) + fout.write('Accuracy: %f\n' % seg_acc) + fout.write('IoU: %f\n\n' % avg_iou) + fout.write('IoU details: %s\n' % iou_log) + + printout(flog, 'Accuracy: %f' % (total_acc / total_seen)) + printout(flog, 'IoU: %f' % (total_acc_iou / total_seen)) + + for cat_idx in range(NUM_OBJ_CATS): + printout(flog, '\t ' + objcats[cat_idx] + ' Total Number: ' + str(total_per_cat_seen[cat_idx])) + if total_per_cat_seen[cat_idx] > 0: + printout(flog, '\t ' + objcats[cat_idx] + ' Accuracy: ' + \ + str(total_per_cat_acc[cat_idx] / total_per_cat_seen[cat_idx])) + printout(flog, '\t ' + objcats[cat_idx] + ' IoU: '+ \ + str(total_per_cat_iou[cat_idx] / total_per_cat_seen[cat_idx])) + + +with tf.Graph().as_default(): + predict() diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/train.py b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/train.py new file mode 100644 index 000000000..abff0ead3 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/train.py @@ -0,0 +1,422 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import subprocess +import tensorflow as tf +import numpy as np +from datetime import datetime +import json +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +sys.path.append(os.path.dirname(BASE_DIR)) +import provider +import pointnet_part_seg as model + +# DEFAULT SETTINGS +parser = argparse.ArgumentParser() +parser.add_argument('--gpu', type=int, default=1, help='GPU to use [default: GPU 0]') +parser.add_argument('--batch', type=int, default=32, help='Batch Size during training [default: 32]') +parser.add_argument('--epoch', type=int, default=200, help='Epoch to run [default: 50]') +parser.add_argument('--point_num', type=int, default=2048, help='Point Number [256/512/1024/2048]') +parser.add_argument('--output_dir', type=str, default='train_results', help='Directory that stores all training logs and trained models') +parser.add_argument('--wd', type=float, default=0, help='Weight Decay [Default: 0.0]') +FLAGS = parser.parse_args() + +hdf5_data_dir = os.path.join(BASE_DIR, './hdf5_data') + +# MAIN SCRIPT +point_num = FLAGS.point_num +batch_size = FLAGS.batch +output_dir = FLAGS.output_dir + +if not os.path.exists(output_dir): + os.mkdir(output_dir) + +color_map_file = os.path.join(hdf5_data_dir, 'part_color_mapping.json') +color_map = json.load(open(color_map_file, 'r')) + +all_obj_cats_file = os.path.join(hdf5_data_dir, 'all_object_categories.txt') +fin = open(all_obj_cats_file, 'r') +lines = [line.rstrip() for line in fin.readlines()] +all_obj_cats = [(line.split()[0], line.split()[1]) for line in lines] +fin.close() + +all_cats = json.load(open(os.path.join(hdf5_data_dir, 'overallid_to_catid_partid.json'), 'r')) +NUM_CATEGORIES = 16 +NUM_PART_CATS = len(all_cats) + +print('#### Batch Size: {0}'.format(batch_size)) +print('#### Point Number: {0}'.format(point_num)) +print('#### Training using GPU: {0}'.format(FLAGS.gpu)) + +DECAY_STEP = 16881 * 20 +DECAY_RATE = 0.5 + +LEARNING_RATE_CLIP = 1e-5 + +BN_INIT_DECAY = 0.5 +BN_DECAY_DECAY_RATE = 0.5 +BN_DECAY_DECAY_STEP = float(DECAY_STEP * 2) +BN_DECAY_CLIP = 0.99 + +BASE_LEARNING_RATE = 0.001 +MOMENTUM = 0.9 +TRAINING_EPOCHES = FLAGS.epoch +print('### Training epoch: {0}'.format(TRAINING_EPOCHES)) + +TRAINING_FILE_LIST = os.path.join(hdf5_data_dir, 'train_hdf5_file_list.txt') +TESTING_FILE_LIST = os.path.join(hdf5_data_dir, 'val_hdf5_file_list.txt') + +MODEL_STORAGE_PATH = os.path.join(output_dir, 'trained_models') +if not os.path.exists(MODEL_STORAGE_PATH): + os.mkdir(MODEL_STORAGE_PATH) + +LOG_STORAGE_PATH = os.path.join(output_dir, 'logs') +if not os.path.exists(LOG_STORAGE_PATH): + os.mkdir(LOG_STORAGE_PATH) + +SUMMARIES_FOLDER = os.path.join(output_dir, 'summaries') +if not os.path.exists(SUMMARIES_FOLDER): + os.mkdir(SUMMARIES_FOLDER) + +def printout(flog, data): + print(data) + flog.write(data + '\n') + +def placeholder_inputs(): + pointclouds_ph = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, point_num, 3)) + input_label_ph = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, NUM_CATEGORIES)) + labels_ph = tf.compat.v1.placeholder(tf.int32, shape=(batch_size)) + seg_ph = tf.compat.v1.placeholder(tf.int32, shape=(batch_size, point_num)) + return pointclouds_ph, input_label_ph, labels_ph, seg_ph + +def convert_label_to_one_hot(labels): + label_one_hot = np.zeros((labels.shape[0], NUM_CATEGORIES)) + for idx in range(labels.shape[0]): + label_one_hot[idx, labels[idx]] = 1 + return label_one_hot + +def train(): + with tf.Graph().as_default(): + with tf.device('/gpu:'+str(FLAGS.gpu)): + pointclouds_ph, input_label_ph, labels_ph, seg_ph = placeholder_inputs() + is_training_ph = tf.compat.v1.placeholder(tf.bool, shape=()) + + batch = tf.Variable(0, trainable=False) + learning_rate = tf.compat.v1.train.exponential_decay( + BASE_LEARNING_RATE, # base learning rate + batch * batch_size, # global_var indicating the number of steps + DECAY_STEP, # step size + DECAY_RATE, # decay rate + staircase=True # Stair-case or continuous decreasing + ) + learning_rate = tf.maximum(learning_rate, LEARNING_RATE_CLIP) + + bn_momentum = tf.compat.v1.train.exponential_decay( + BN_INIT_DECAY, + batch*batch_size, + BN_DECAY_DECAY_STEP, + BN_DECAY_DECAY_RATE, + staircase=True) + bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum) + + lr_op = tf.compat.v1.summary.scalar('learning_rate', learning_rate) + batch_op = tf.compat.v1.summary.scalar('batch_number', batch) + bn_decay_op = tf.compat.v1.summary.scalar('bn_decay', bn_decay) + + labels_pred, seg_pred, end_points = model.get_model(pointclouds_ph, input_label_ph, \ + is_training=is_training_ph, bn_decay=bn_decay, cat_num=NUM_CATEGORIES, \ + part_num=NUM_PART_CATS, batch_size=batch_size, num_point=point_num, weight_decay=FLAGS.wd) + + # model.py defines both classification net and segmentation net, which share the common global feature extractor network. + # In model.get_loss, we define the total loss to be weighted sum of the classification and segmentation losses. + # Here, we only train for segmentation network. Thus, we set weight to be 1.0. + loss, label_loss, per_instance_label_loss, seg_loss, per_instance_seg_loss, per_instance_seg_pred_res \ + = model.get_loss(labels_pred, seg_pred, labels_ph, seg_ph, 1.0, end_points) + + total_training_loss_ph = tf.compat.v1.placeholder(tf.float32, shape=()) + total_testing_loss_ph = tf.compat.v1.placeholder(tf.float32, shape=()) + + label_training_loss_ph = tf.compat.v1.placeholder(tf.float32, shape=()) + label_testing_loss_ph = tf.compat.v1.placeholder(tf.float32, shape=()) + + seg_training_loss_ph = tf.compat.v1.placeholder(tf.float32, shape=()) + seg_testing_loss_ph = tf.compat.v1.placeholder(tf.float32, shape=()) + + label_training_acc_ph = tf.compat.v1.placeholder(tf.float32, shape=()) + label_testing_acc_ph = tf.compat.v1.placeholder(tf.float32, shape=()) + label_testing_acc_avg_cat_ph = tf.compat.v1.placeholder(tf.float32, shape=()) + + seg_training_acc_ph = tf.compat.v1.placeholder(tf.float32, shape=()) + seg_testing_acc_ph = tf.compat.v1.placeholder(tf.float32, shape=()) + seg_testing_acc_avg_cat_ph = tf.compat.v1.placeholder(tf.float32, shape=()) + + total_train_loss_sum_op = tf.compat.v1.summary.scalar('total_training_loss', total_training_loss_ph) + total_test_loss_sum_op = tf.compat.v1.summary.scalar('total_testing_loss', total_testing_loss_ph) + + label_train_loss_sum_op = tf.compat.v1.summary.scalar('label_training_loss', label_training_loss_ph) + label_test_loss_sum_op = tf.compat.v1.summary.scalar('label_testing_loss', label_testing_loss_ph) + + seg_train_loss_sum_op = tf.compat.v1.summary.scalar('seg_training_loss', seg_training_loss_ph) + seg_test_loss_sum_op = tf.compat.v1.summary.scalar('seg_testing_loss', seg_testing_loss_ph) + + label_train_acc_sum_op = tf.compat.v1.summary.scalar('label_training_acc', label_training_acc_ph) + label_test_acc_sum_op = tf.compat.v1.summary.scalar('label_testing_acc', label_testing_acc_ph) + label_test_acc_avg_cat_op = tf.compat.v1.summary.scalar('label_testing_acc_avg_cat', label_testing_acc_avg_cat_ph) + + seg_train_acc_sum_op = tf.compat.v1.summary.scalar('seg_training_acc', seg_training_acc_ph) + seg_test_acc_sum_op = tf.compat.v1.summary.scalar('seg_testing_acc', seg_testing_acc_ph) + seg_test_acc_avg_cat_op = tf.compat.v1.summary.scalar('seg_testing_acc_avg_cat', seg_testing_acc_avg_cat_ph) + + train_variables = tf.compat.v1.trainable_variables() + + trainer = tf.compat.v1.train.AdamOptimizer(learning_rate) + train_op = trainer.minimize(loss, var_list=train_variables, global_step=batch) + + saver = tf.compat.v1.train.Saver() + + config = tf.compat.v1.ConfigProto() + config.gpu_options.allow_growth = True + config.allow_soft_placement = True + sess = tf.compat.v1.Session(config=config) + + init = tf.compat.v1.global_variables_initializer() + sess.run(init) + + train_writer = tf.compat.v1.summary.FileWriter(SUMMARIES_FOLDER + '/train', sess.graph) + test_writer = tf.compat.v1.summary.FileWriter(SUMMARIES_FOLDER + '/test') + + train_file_list = provider.getDataFiles(TRAINING_FILE_LIST) + num_train_file = len(train_file_list) + test_file_list = provider.getDataFiles(TESTING_FILE_LIST) + num_test_file = len(test_file_list) + + fcmd = open(os.path.join(LOG_STORAGE_PATH, 'cmd.txt'), 'w') + fcmd.write(str(FLAGS)) + fcmd.close() + + # write logs to the disk + flog = open(os.path.join(LOG_STORAGE_PATH, 'log.txt'), 'w') + + def train_one_epoch(train_file_idx, epoch_num): + is_training = True + + for i in range(num_train_file): + cur_train_filename = os.path.join(hdf5_data_dir, train_file_list[train_file_idx[i]]) + printout(flog, 'Loading train file ' + cur_train_filename) + + cur_data, cur_labels, cur_seg = provider.loadDataFile_with_seg(cur_train_filename) + cur_data, cur_labels, order = provider.shuffle_data(cur_data, np.squeeze(cur_labels)) + cur_seg = cur_seg[order, ...] + + cur_labels_one_hot = convert_label_to_one_hot(cur_labels) + + num_data = len(cur_labels) + num_batch = num_data // batch_size + + total_loss = 0.0 + total_label_loss = 0.0 + total_seg_loss = 0.0 + total_label_acc = 0.0 + total_seg_acc = 0.0 + + for j in range(num_batch): + begidx = j * batch_size + endidx = (j + 1) * batch_size + + feed_dict = { + pointclouds_ph: cur_data[begidx: endidx, ...], + labels_ph: cur_labels[begidx: endidx, ...], + input_label_ph: cur_labels_one_hot[begidx: endidx, ...], + seg_ph: cur_seg[begidx: endidx, ...], + is_training_ph: is_training, + } + + _, loss_val, label_loss_val, seg_loss_val, per_instance_label_loss_val, \ + per_instance_seg_loss_val, label_pred_val, seg_pred_val, pred_seg_res \ + = sess.run([train_op, loss, label_loss, seg_loss, per_instance_label_loss, \ + per_instance_seg_loss, labels_pred, seg_pred, per_instance_seg_pred_res], \ + feed_dict=feed_dict) + + per_instance_part_acc = np.mean(pred_seg_res == cur_seg[begidx: endidx, ...], axis=1) + average_part_acc = np.mean(per_instance_part_acc) + + total_loss += loss_val + total_label_loss += label_loss_val + total_seg_loss += seg_loss_val + + per_instance_label_pred = np.argmax(label_pred_val, axis=1) + total_label_acc += np.mean(np.float32(per_instance_label_pred == cur_labels[begidx: endidx, ...])) + total_seg_acc += average_part_acc + + total_loss = total_loss * 1.0 / num_batch + total_label_loss = total_label_loss * 1.0 / num_batch + total_seg_loss = total_seg_loss * 1.0 / num_batch + total_label_acc = total_label_acc * 1.0 / num_batch + total_seg_acc = total_seg_acc * 1.0 / num_batch + + lr_sum, bn_decay_sum, batch_sum, train_loss_sum, train_label_acc_sum, \ + train_label_loss_sum, train_seg_loss_sum, train_seg_acc_sum = sess.run(\ + [lr_op, bn_decay_op, batch_op, total_train_loss_sum_op, label_train_acc_sum_op, \ + label_train_loss_sum_op, seg_train_loss_sum_op, seg_train_acc_sum_op], \ + feed_dict={total_training_loss_ph: total_loss, label_training_loss_ph: total_label_loss, \ + seg_training_loss_ph: total_seg_loss, label_training_acc_ph: total_label_acc, \ + seg_training_acc_ph: total_seg_acc}) + + train_writer.add_summary(train_loss_sum, i + epoch_num * num_train_file) + train_writer.add_summary(train_label_loss_sum, i + epoch_num * num_train_file) + train_writer.add_summary(train_seg_loss_sum, i + epoch_num * num_train_file) + train_writer.add_summary(lr_sum, i + epoch_num * num_train_file) + train_writer.add_summary(bn_decay_sum, i + epoch_num * num_train_file) + train_writer.add_summary(train_label_acc_sum, i + epoch_num * num_train_file) + train_writer.add_summary(train_seg_acc_sum, i + epoch_num * num_train_file) + train_writer.add_summary(batch_sum, i + epoch_num * num_train_file) + + printout(flog, '\tTraining Total Mean_loss: %f' % total_loss) + printout(flog, '\t\tTraining Label Mean_loss: %f' % total_label_loss) + printout(flog, '\t\tTraining Label Accuracy: %f' % total_label_acc) + printout(flog, '\t\tTraining Seg Mean_loss: %f' % total_seg_loss) + printout(flog, '\t\tTraining Seg Accuracy: %f' % total_seg_acc) + + def eval_one_epoch(epoch_num): + is_training = False + + total_loss = 0.0 + total_label_loss = 0.0 + total_seg_loss = 0.0 + total_label_acc = 0.0 + total_seg_acc = 0.0 + total_seen = 0 + + total_label_acc_per_cat = np.zeros((NUM_CATEGORIES)).astype(np.float32) + total_seg_acc_per_cat = np.zeros((NUM_CATEGORIES)).astype(np.float32) + total_seen_per_cat = np.zeros((NUM_CATEGORIES)).astype(np.int32) + + for i in range(num_test_file): + cur_test_filename = os.path.join(hdf5_data_dir, test_file_list[i]) + printout(flog, 'Loading test file ' + cur_test_filename) + + cur_data, cur_labels, cur_seg = provider.loadDataFile_with_seg(cur_test_filename) + cur_labels = np.squeeze(cur_labels) + + cur_labels_one_hot = convert_label_to_one_hot(cur_labels) + + num_data = len(cur_labels) + num_batch = num_data // batch_size + + for j in range(num_batch): + begidx = j * batch_size + endidx = (j + 1) * batch_size + feed_dict = { + pointclouds_ph: cur_data[begidx: endidx, ...], + labels_ph: cur_labels[begidx: endidx, ...], + input_label_ph: cur_labels_one_hot[begidx: endidx, ...], + seg_ph: cur_seg[begidx: endidx, ...], + is_training_ph: is_training, + } + + loss_val, label_loss_val, seg_loss_val, per_instance_label_loss_val, \ + per_instance_seg_loss_val, label_pred_val, seg_pred_val, pred_seg_res \ + = sess.run([loss, label_loss, seg_loss, per_instance_label_loss, \ + per_instance_seg_loss, labels_pred, seg_pred, per_instance_seg_pred_res], \ + feed_dict=feed_dict) + + per_instance_part_acc = np.mean(pred_seg_res == cur_seg[begidx: endidx, ...], axis=1) + average_part_acc = np.mean(per_instance_part_acc) + + total_seen += 1 + total_loss += loss_val + total_label_loss += label_loss_val + total_seg_loss += seg_loss_val + + per_instance_label_pred = np.argmax(label_pred_val, axis=1) + total_label_acc += np.mean(np.float32(per_instance_label_pred == cur_labels[begidx: endidx, ...])) + total_seg_acc += average_part_acc + + for shape_idx in range(begidx, endidx): + total_seen_per_cat[cur_labels[shape_idx]] += 1 + total_label_acc_per_cat[cur_labels[shape_idx]] += np.int32(per_instance_label_pred[shape_idx-begidx] == cur_labels[shape_idx]) + total_seg_acc_per_cat[cur_labels[shape_idx]] += per_instance_part_acc[shape_idx - begidx] + + total_loss = total_loss * 1.0 / total_seen + total_label_loss = total_label_loss * 1.0 / total_seen + total_seg_loss = total_seg_loss * 1.0 / total_seen + total_label_acc = total_label_acc * 1.0 / total_seen + total_seg_acc = total_seg_acc * 1.0 / total_seen + + test_loss_sum, test_label_acc_sum, test_label_loss_sum, test_seg_loss_sum, test_seg_acc_sum = sess.run(\ + [total_test_loss_sum_op, label_test_acc_sum_op, label_test_loss_sum_op, seg_test_loss_sum_op, seg_test_acc_sum_op], \ + feed_dict={total_testing_loss_ph: total_loss, label_testing_loss_ph: total_label_loss, \ + seg_testing_loss_ph: total_seg_loss, label_testing_acc_ph: total_label_acc, seg_testing_acc_ph: total_seg_acc}) + + test_writer.add_summary(test_loss_sum, (epoch_num+1) * num_train_file-1) + test_writer.add_summary(test_label_loss_sum, (epoch_num+1) * num_train_file-1) + test_writer.add_summary(test_seg_loss_sum, (epoch_num+1) * num_train_file-1) + test_writer.add_summary(test_label_acc_sum, (epoch_num+1) * num_train_file-1) + test_writer.add_summary(test_seg_acc_sum, (epoch_num+1) * num_train_file-1) + + printout(flog, '\tTesting Total Mean_loss: %f' % total_loss) + printout(flog, '\t\tTesting Label Mean_loss: %f' % total_label_loss) + printout(flog, '\t\tTesting Label Accuracy: %f' % total_label_acc) + printout(flog, '\t\tTesting Seg Mean_loss: %f' % total_seg_loss) + printout(flog, '\t\tTesting Seg Accuracy: %f' % total_seg_acc) + + for cat_idx in range(NUM_CATEGORIES): + if total_seen_per_cat[cat_idx] > 0: + printout(flog, '\n\t\tCategory %s Object Number: %d' % (all_obj_cats[cat_idx][0], total_seen_per_cat[cat_idx])) + printout(flog, '\t\tCategory %s Label Accuracy: %f' % (all_obj_cats[cat_idx][0], total_label_acc_per_cat[cat_idx]/total_seen_per_cat[cat_idx])) + printout(flog, '\t\tCategory %s Seg Accuracy: %f' % (all_obj_cats[cat_idx][0], total_seg_acc_per_cat[cat_idx]/total_seen_per_cat[cat_idx])) + + if not os.path.exists(MODEL_STORAGE_PATH): + os.mkdir(MODEL_STORAGE_PATH) + + for epoch in range(TRAINING_EPOCHES): + printout(flog, '\n<<< Testing on the test dataset ...') + eval_one_epoch(epoch) + + printout(flog, '\n>>> Training for the epoch %d/%d ...' % (epoch, TRAINING_EPOCHES)) + + train_file_idx = np.arange(0, len(train_file_list)) + np.random.shuffle(train_file_idx) + + train_one_epoch(train_file_idx, epoch) + + if (epoch+1) % 10 == 0: + cp_filename = saver.save(sess, os.path.join(MODEL_STORAGE_PATH, 'epoch_' + str(epoch+1)+'.ckpt')) + printout(flog, 'Successfully store the checkpoint model into ' + cp_filename) + + flog.flush() + + flog.close() + +if __name__=='__main__': + train() -- Gitee From b6ab2c80ef64d462216b4d3031db45a9af0f5b43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:47:05 +0000 Subject: [PATCH 27/54] add TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/1.txt. --- .../PointNet_ID2913_for_TensorFlow2.X/part_seg/1.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/1.txt diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/1.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/1.txt new file mode 100644 index 000000000..56a6051ca --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/1.txt @@ -0,0 +1 @@ +1 \ No newline at end of file -- Gitee From 176aacdf28b2c2fd1ecfa7228b9357cf758307c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 06:54:53 +0000 Subject: [PATCH 28/54] add part_seg/testing_ply_file_list.txt. --- .../part_seg/testing_ply_file_list.txt | 2874 +++++++++++++++++ 1 file changed, 2874 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/testing_ply_file_list.txt diff --git a/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/testing_ply_file_list.txt b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/testing_ply_file_list.txt new file mode 100644 index 000000000..b1ff7f478 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/PointNet_ID2913_for_TensorFlow2.X/part_seg/testing_ply_file_list.txt @@ -0,0 +1,2874 @@ +03001627/points/355fa0f35b61fdd7aa74a6b5ee13e775.pts 03001627/expert_verified/points_label/355fa0f35b61fdd7aa74a6b5ee13e775.seg 03001627 +04379243/points/408c3db9b4ee6be2e9f3e9c758fef992.pts 04379243/expert_verified/points_label/408c3db9b4ee6be2e9f3e9c758fef992.seg 04379243 +02691156/points/a1708ad923f3b51abbf3143b1cb6076a.pts 02691156/expert_verified/points_label/a1708ad923f3b51abbf3143b1cb6076a.seg 02691156 +03001627/points/2783a969fa42cdecbe31379a5751d820.pts 03001627/expert_verified/points_label/2783a969fa42cdecbe31379a5751d820.seg 03001627 +03001627/points/ed56af61297594bf1c4300651205adf3.pts 03001627/expert_verified/points_label/ed56af61297594bf1c4300651205adf3.seg 03001627 +03001627/points/c0857de5101f704f3c5e1addd9922bf2.pts 03001627/expert_verified/points_label/c0857de5101f704f3c5e1addd9922bf2.seg 03001627 +02691156/points/b72804a8bd3dbbaca8607f540cc62ba.pts 02691156/expert_verified/points_label/b72804a8bd3dbbaca8607f540cc62ba.seg 02691156 +03001627/points/df609533cd186278398c7598b0d2e5d5.pts 03001627/expert_verified/points_label/df609533cd186278398c7598b0d2e5d5.seg 03001627 +04379243/points/c24b7a315dbf2f3178ab7c8b395efbfe.pts 04379243/expert_verified/points_label/c24b7a315dbf2f3178ab7c8b395efbfe.seg 04379243 +03636649/points/b8c87ad9d4930983a8d82fc8a3e54728.pts 03636649/expert_verified/points_label/b8c87ad9d4930983a8d82fc8a3e54728.seg 03636649 +02691156/points/8add45a11c9fcb446eb5821e78d8898a.pts 02691156/expert_verified/points_label/8add45a11c9fcb446eb5821e78d8898a.seg 02691156 +04379243/points/94d6518cf1e00eaac013a7bed5288654.pts 04379243/expert_verified/points_label/94d6518cf1e00eaac013a7bed5288654.seg 04379243 +04379243/points/1dbb8fd083f96ad279b3e1be3524f72f.pts 04379243/expert_verified/points_label/1dbb8fd083f96ad279b3e1be3524f72f.seg 04379243 +03001627/points/452115e132539be4daaaeef365d8f6e5.pts 03001627/expert_verified/points_label/452115e132539be4daaaeef365d8f6e5.seg 03001627 +04379243/points/bd25dfa62c3c2cf772bd03149507655d.pts 04379243/expert_verified/points_label/bd25dfa62c3c2cf772bd03149507655d.seg 04379243 +03948459/points/b1bbe535a833635d91f9af3df5b0c8fc.pts 03948459/expert_verified/points_label/b1bbe535a833635d91f9af3df5b0c8fc.seg 03948459 +04379243/points/d41c8af82fe98a019fb4103277a6b93.pts 04379243/expert_verified/points_label/d41c8af82fe98a019fb4103277a6b93.seg 04379243 +03001627/points/3109a0b9f9bc5fecb4cd1bd556007aed.pts 03001627/expert_verified/points_label/3109a0b9f9bc5fecb4cd1bd556007aed.seg 03001627 +03001627/points/d38129a3301d31350b1fc43ca5e85e.pts 03001627/expert_verified/points_label/d38129a3301d31350b1fc43ca5e85e.seg 03001627 +03636649/points/495af808806f1727a753b1b88fff4abb.pts 03636649/expert_verified/points_label/495af808806f1727a753b1b88fff4abb.seg 03636649 +04379243/points/4d3cc502d4444c848cbb8bac2032149c.pts 04379243/expert_verified/points_label/4d3cc502d4444c848cbb8bac2032149c.seg 04379243 +02691156/points/ed7e1a38fe33830b87697d3904b168b.pts 02691156/expert_verified/points_label/ed7e1a38fe33830b87697d3904b168b.seg 02691156 +04379243/points/cf076ced8264a480cce90f0d61ed7a70.pts 04379243/expert_verified/points_label/cf076ced8264a480cce90f0d61ed7a70.seg 04379243 +04379243/points/c04b363fd824528bd42b9650f19dd425.pts 04379243/expert_verified/points_label/c04b363fd824528bd42b9650f19dd425.seg 04379243 +04379243/points/9705c2610980d0fdb2d0500bdfc28f70.pts 04379243/expert_verified/points_label/9705c2610980d0fdb2d0500bdfc28f70.seg 04379243 +02691156/points/de29a1335c332a5ef7bc9a344bb7bae5.pts 02691156/expert_verified/points_label/de29a1335c332a5ef7bc9a344bb7bae5.seg 02691156 +03001627/points/75d0664363f418efe461a9a9741d9415.pts 03001627/expert_verified/points_label/75d0664363f418efe461a9a9741d9415.seg 03001627 +03001627/points/3421ad5a45b85f7a4b3c42e318f3affc.pts 03001627/expert_verified/points_label/3421ad5a45b85f7a4b3c42e318f3affc.seg 03001627 +03001627/points/c67a255a26e30abb6b9f3980da0b1dff.pts 03001627/expert_verified/points_label/c67a255a26e30abb6b9f3980da0b1dff.seg 03001627 +04379243/points/6791c92944c99c029f1deb04fb8ae481.pts 04379243/expert_verified/points_label/6791c92944c99c029f1deb04fb8ae481.seg 04379243 +04379243/points/4b5536d2e9c5b9b7febad4f49b26ec52.pts 04379243/expert_verified/points_label/4b5536d2e9c5b9b7febad4f49b26ec52.seg 04379243 +04379243/points/c5fc6c1e0d446d37acce1c6e70b58979.pts 04379243/expert_verified/points_label/c5fc6c1e0d446d37acce1c6e70b58979.seg 04379243 +03001627/points/9c8d3c5779871705d22218517e73100.pts 03001627/expert_verified/points_label/9c8d3c5779871705d22218517e73100.seg 03001627 +04379243/points/4f70d14dc276a9539a83764a2641fc5c.pts 04379243/expert_verified/points_label/4f70d14dc276a9539a83764a2641fc5c.seg 04379243 +04379243/points/9d8f0444a8c09adff0d4c8f4dd125299.pts 04379243/expert_verified/points_label/9d8f0444a8c09adff0d4c8f4dd125299.seg 04379243 +04379243/points/57fbb082f660c4f7716b680dedf77108.pts 04379243/expert_verified/points_label/57fbb082f660c4f7716b680dedf77108.seg 04379243 +02958343/points/cb19594e73992a3d51008e496c6cfd2e.pts 02958343/expert_verified/points_label/cb19594e73992a3d51008e496c6cfd2e.seg 02958343 +03624134/points/9d424831d05d363d870906b5178d97bd.pts 03624134/expert_verified/points_label/9d424831d05d363d870906b5178d97bd.seg 03624134 +03001627/points/b884ff155c4117a7508dd48e67ad44bc.pts 03001627/expert_verified/points_label/b884ff155c4117a7508dd48e67ad44bc.seg 03001627 +02958343/points/7a5eba46ba4cfac35aa429db266f0c30.pts 02958343/expert_verified/points_label/7a5eba46ba4cfac35aa429db266f0c30.seg 02958343 +02691156/points/4def53f149137451b0009f08a96f38a9.pts 02691156/expert_verified/points_label/4def53f149137451b0009f08a96f38a9.seg 02691156 +03001627/points/fa8f7c225d3b9f1def4a09e7eb872bd9.pts 03001627/expert_verified/points_label/fa8f7c225d3b9f1def4a09e7eb872bd9.seg 03001627 +04225987/points/f5d7698b5a57d61226e0640b67de606.pts 04225987/expert_verified/points_label/f5d7698b5a57d61226e0640b67de606.seg 04225987 +03001627/points/9aece6c6436cde6fd9ac1bf1eddffd24.pts 03001627/expert_verified/points_label/9aece6c6436cde6fd9ac1bf1eddffd24.seg 03001627 +04099429/points/15474cf9caa757a528eba1f0b7744e9.pts 04099429/expert_verified/points_label/15474cf9caa757a528eba1f0b7744e9.seg 04099429 +02691156/points/571cfb1da3d5b3704b5910188444efc8.pts 02691156/expert_verified/points_label/571cfb1da3d5b3704b5910188444efc8.seg 02691156 +03636649/points/5d97be0e2414bfe0a8930422448288ea.pts 03636649/expert_verified/points_label/5d97be0e2414bfe0a8930422448288ea.seg 03636649 +02958343/points/648ceaad362345518a6cf8c6b92417f2.pts 02958343/expert_verified/points_label/648ceaad362345518a6cf8c6b92417f2.seg 02958343 +03001627/points/8a845bb67ee8486d6199d6fe090be061.pts 03001627/expert_verified/points_label/8a845bb67ee8486d6199d6fe090be061.seg 03001627 +04379243/points/3645a90e02d16f0584aa8fa8b66ba302.pts 04379243/expert_verified/points_label/3645a90e02d16f0584aa8fa8b66ba302.seg 04379243 +04379243/points/ecf3d40b14300d3c0c26b04b6b8e17a.pts 04379243/expert_verified/points_label/ecf3d40b14300d3c0c26b04b6b8e17a.seg 04379243 +04379243/points/a860e5edcaec268e615bcf72f8385966.pts 04379243/expert_verified/points_label/a860e5edcaec268e615bcf72f8385966.seg 04379243 +03001627/points/5edfec789343e0c3319f1c1eee46f332.pts 03001627/expert_verified/points_label/5edfec789343e0c3319f1c1eee46f332.seg 03001627 +02691156/points/92fb0d6a866fe7aca8607f540cc62ba.pts 02691156/expert_verified/points_label/92fb0d6a866fe7aca8607f540cc62ba.seg 02691156 +02958343/points/e4886a4d0c6ea960fe21694bd5f519d1.pts 02958343/expert_verified/points_label/e4886a4d0c6ea960fe21694bd5f519d1.seg 02958343 +03636649/points/e3ee6b31e54e95b7d42b9650f19dd425.pts 03636649/expert_verified/points_label/e3ee6b31e54e95b7d42b9650f19dd425.seg 03636649 +03467517/points/d546e034a6c659a425cd348738a8052a.pts 03467517/expert_verified/points_label/d546e034a6c659a425cd348738a8052a.seg 03467517 +03001627/points/26a6ce644504c5fa22963ea1e168015d.pts 03001627/expert_verified/points_label/26a6ce644504c5fa22963ea1e168015d.seg 03001627 +02691156/points/b2b1c1d5c757af8a7209009cfb89d4bd.pts 02691156/expert_verified/points_label/b2b1c1d5c757af8a7209009cfb89d4bd.seg 02691156 +03467517/points/4bd2492d56d6b8c537b5646da91e9ed0.pts 03467517/expert_verified/points_label/4bd2492d56d6b8c537b5646da91e9ed0.seg 03467517 +04379243/points/92ed9344484dd026dfd21203bf8b4b46.pts 04379243/expert_verified/points_label/92ed9344484dd026dfd21203bf8b4b46.seg 04379243 +04379243/points/2d1d8a2f976387bd3145205f02ff9fc5.pts 04379243/expert_verified/points_label/2d1d8a2f976387bd3145205f02ff9fc5.seg 04379243 +03467517/points/5b7fcd85ce6fd1931377689fa4e4b2d6.pts 03467517/expert_verified/points_label/5b7fcd85ce6fd1931377689fa4e4b2d6.seg 03467517 +02691156/points/4cee36a2e8dd3b24b87697d3904b168b.pts 02691156/expert_verified/points_label/4cee36a2e8dd3b24b87697d3904b168b.seg 02691156 +03001627/points/f23c1bb951fa8909bc01640b1b5116e7.pts 03001627/expert_verified/points_label/f23c1bb951fa8909bc01640b1b5116e7.seg 03001627 +04379243/points/370b45eeeb9b11416f04d49e4de95b59.pts 04379243/expert_verified/points_label/370b45eeeb9b11416f04d49e4de95b59.seg 04379243 +03001627/points/3885255ca5d75e69da2260dc4a1fc2c6.pts 03001627/expert_verified/points_label/3885255ca5d75e69da2260dc4a1fc2c6.seg 03001627 +02691156/points/452c18f8997c53741adbb4c4e06ad649.pts 02691156/expert_verified/points_label/452c18f8997c53741adbb4c4e06ad649.seg 02691156 +03001627/points/8b39b501c9fa4d349b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/8b39b501c9fa4d349b9f2eb77f5e247e.seg 03001627 +04379243/points/94966aa8a7a6f540f6807434c358ea12.pts 04379243/expert_verified/points_label/94966aa8a7a6f540f6807434c358ea12.seg 04379243 +03001627/points/9b6f17ce2db29c4c9ae35d137ece64f9.pts 03001627/expert_verified/points_label/9b6f17ce2db29c4c9ae35d137ece64f9.seg 03001627 +03467517/points/85bef84a26a91bff9ce363b13bdd195d.pts 03467517/expert_verified/points_label/85bef84a26a91bff9ce363b13bdd195d.seg 03467517 +03624134/points/e98bc872371c852e15b040d25222e627.pts 03624134/expert_verified/points_label/e98bc872371c852e15b040d25222e627.seg 03624134 +04379243/points/5dff67091a2f7ef1ab988fe471b1bd06.pts 04379243/expert_verified/points_label/5dff67091a2f7ef1ab988fe471b1bd06.seg 04379243 +03001627/points/e6f37dff25ec4ca4f815ebdb2df45512.pts 03001627/expert_verified/points_label/e6f37dff25ec4ca4f815ebdb2df45512.seg 03001627 +02691156/points/85a15c26a6e9921ae008cc4902bfe3cd.pts 02691156/expert_verified/points_label/85a15c26a6e9921ae008cc4902bfe3cd.seg 02691156 +03001627/points/94371ddd6d62f7b762ec387b772e9e1.pts 03001627/expert_verified/points_label/94371ddd6d62f7b762ec387b772e9e1.seg 03001627 +02691156/points/4374a3b4b98e247b398db3ebdf468ed7.pts 02691156/expert_verified/points_label/4374a3b4b98e247b398db3ebdf468ed7.seg 02691156 +03948459/points/8fa02aab7237289667fdfbdf64f19325.pts 03948459/expert_verified/points_label/8fa02aab7237289667fdfbdf64f19325.seg 03948459 +04379243/points/9f1fcee83cacf964f4b6538438a0b930.pts 04379243/expert_verified/points_label/9f1fcee83cacf964f4b6538438a0b930.seg 04379243 +04225987/points/f5643778dbcd653655a834a7aafb0236.pts 04225987/expert_verified/points_label/f5643778dbcd653655a834a7aafb0236.seg 04225987 +03636649/points/cdbe11124dbf418167ac0fa90111fad0.pts 03636649/expert_verified/points_label/cdbe11124dbf418167ac0fa90111fad0.seg 03636649 +03001627/points/e3d23dc47ddd9620c9be65dfbd21428b.pts 03001627/expert_verified/points_label/e3d23dc47ddd9620c9be65dfbd21428b.seg 03001627 +03001627/points/efd0411eaf2396c4de7ed732f5aeea4.pts 03001627/expert_verified/points_label/efd0411eaf2396c4de7ed732f5aeea4.seg 03001627 +03636649/points/7ad15667f654fc08664b3b9b23ddfcbc.pts 03636649/expert_verified/points_label/7ad15667f654fc08664b3b9b23ddfcbc.seg 03636649 +04379243/points/55d5fce641343449d42b9650f19dd425.pts 04379243/expert_verified/points_label/55d5fce641343449d42b9650f19dd425.seg 04379243 +03467517/points/a31ef3a8c70b789b93f0194265a9746c.pts 03467517/expert_verified/points_label/a31ef3a8c70b789b93f0194265a9746c.seg 03467517 +03001627/points/ccfc857f35c138ede785b88cc9024b2a.pts 03001627/expert_verified/points_label/ccfc857f35c138ede785b88cc9024b2a.seg 03001627 +02691156/points/e3fd510add7b1aa3c19eb6ab3736de88.pts 02691156/expert_verified/points_label/e3fd510add7b1aa3c19eb6ab3736de88.seg 02691156 +03636649/points/213d911cc489c352b5db3f95d706a0c9.pts 03636649/expert_verified/points_label/213d911cc489c352b5db3f95d706a0c9.seg 03636649 +04225987/points/c171d90db4c4ba56cdb1768065dafd0c.pts 04225987/expert_verified/points_label/c171d90db4c4ba56cdb1768065dafd0c.seg 04225987 +03797390/points/10f6e09036350e92b3f21f1137c3c347.pts 03797390/expert_verified/points_label/10f6e09036350e92b3f21f1137c3c347.seg 03797390 +02691156/points/a374b0448461438ef3d4cc10d9776c62.pts 02691156/expert_verified/points_label/a374b0448461438ef3d4cc10d9776c62.seg 02691156 +03001627/points/b6457a76f24de9f67aa6f8353fce2005.pts 03001627/expert_verified/points_label/b6457a76f24de9f67aa6f8353fce2005.seg 03001627 +03001627/points/7fe08cd7a9b76c1dcbde89e0c48a01bf.pts 03001627/expert_verified/points_label/7fe08cd7a9b76c1dcbde89e0c48a01bf.seg 03001627 +03001627/points/58867a00409c47c0813a1237d2827540.pts 03001627/expert_verified/points_label/58867a00409c47c0813a1237d2827540.seg 03001627 +02958343/points/65e3e2893669a09cc7b48e36e31209b9.pts 02958343/expert_verified/points_label/65e3e2893669a09cc7b48e36e31209b9.seg 02958343 +03948459/points/edec08542b9312b712b38b1d99376c0b.pts 03948459/expert_verified/points_label/edec08542b9312b712b38b1d99376c0b.seg 03948459 +03636649/points/cd80cc92cf732e8d8a17805dbfb751e2.pts 03636649/expert_verified/points_label/cd80cc92cf732e8d8a17805dbfb751e2.seg 03636649 +03467517/points/87650e8ff3d85672381b7fbf79296afb.pts 03467517/expert_verified/points_label/87650e8ff3d85672381b7fbf79296afb.seg 03467517 +03636649/points/1e91664763d371937dd73da65dc0e6a7.pts 03636649/expert_verified/points_label/1e91664763d371937dd73da65dc0e6a7.seg 03636649 +04379243/points/104c8e90ecf0e5351ed672982b7954af.pts 04379243/expert_verified/points_label/104c8e90ecf0e5351ed672982b7954af.seg 04379243 +04379243/points/1834fac2f46a26f91933ffef19678834.pts 04379243/expert_verified/points_label/1834fac2f46a26f91933ffef19678834.seg 04379243 +04379243/points/ed0be8928caab4bdab610b0c94236463.pts 04379243/expert_verified/points_label/ed0be8928caab4bdab610b0c94236463.seg 04379243 +04379243/points/105f53a6471f3ceb4a420e3c1b966720.pts 04379243/expert_verified/points_label/105f53a6471f3ceb4a420e3c1b966720.seg 04379243 +04379243/points/7bf5f689da285153583ff8a5fc7c1869.pts 04379243/expert_verified/points_label/7bf5f689da285153583ff8a5fc7c1869.seg 04379243 +02958343/points/eface8341d001e9ceb01ae4a4788bd4f.pts 02958343/expert_verified/points_label/eface8341d001e9ceb01ae4a4788bd4f.seg 02958343 +03001627/points/517880899d26080471a782a4379556c7.pts 03001627/expert_verified/points_label/517880899d26080471a782a4379556c7.seg 03001627 +03001627/points/5ef3e4abd4386c8871bc6030acc85f1e.pts 03001627/expert_verified/points_label/5ef3e4abd4386c8871bc6030acc85f1e.seg 03001627 +03001627/points/3eb60e6679d1df1dde7eedbb2790491b.pts 03001627/expert_verified/points_label/3eb60e6679d1df1dde7eedbb2790491b.seg 03001627 +03001627/points/4702e6196503ff84f1c0e03f321d0b20.pts 03001627/expert_verified/points_label/4702e6196503ff84f1c0e03f321d0b20.seg 03001627 +02958343/points/b0a7789537663f7ba1ff2929b2f5cf19.pts 02958343/expert_verified/points_label/b0a7789537663f7ba1ff2929b2f5cf19.seg 02958343 +03636649/points/2ce7732982343c1d9792f6094a78f8d5.pts 03636649/expert_verified/points_label/2ce7732982343c1d9792f6094a78f8d5.seg 03636649 +03467517/points/78a75ce8dc8dc197dc2b574e941c815b.pts 03467517/expert_verified/points_label/78a75ce8dc8dc197dc2b574e941c815b.seg 03467517 +03636649/points/348d6ddf9e02cbddf647dc544bb0ab61.pts 03636649/expert_verified/points_label/348d6ddf9e02cbddf647dc544bb0ab61.seg 03636649 +03001627/points/e56087cd55cce8b4f41a4361d0ca9bc8.pts 03001627/expert_verified/points_label/e56087cd55cce8b4f41a4361d0ca9bc8.seg 03001627 +03642806/points/4d3dde22f529195bc887d5d9a11f3155.pts 03642806/expert_verified/points_label/4d3dde22f529195bc887d5d9a11f3155.seg 03642806 +03001627/points/78e1977bc5f0f4041552c6ecbda964b.pts 03001627/expert_verified/points_label/78e1977bc5f0f4041552c6ecbda964b.seg 03001627 +04379243/points/44360c91a7e91098d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/44360c91a7e91098d93768e7b9b1eabf.seg 04379243 +02691156/points/52ca6970fb09b561f9f7510373841dd9.pts 02691156/expert_verified/points_label/52ca6970fb09b561f9f7510373841dd9.seg 02691156 +02958343/points/383f8d508b6f25f565d21723f535417.pts 02958343/expert_verified/points_label/383f8d508b6f25f565d21723f535417.seg 02958343 +03001627/points/d6da5457b0682e24696b74614952b2d0.pts 03001627/expert_verified/points_label/d6da5457b0682e24696b74614952b2d0.seg 03001627 +02691156/points/9f5dda6f01bbe29bf810506e9ae2dcc2.pts 02691156/expert_verified/points_label/9f5dda6f01bbe29bf810506e9ae2dcc2.seg 02691156 +03467517/points/35e77edd3ae6ad4993f0194265a9746c.pts 03467517/expert_verified/points_label/35e77edd3ae6ad4993f0194265a9746c.seg 03467517 +03001627/points/590d04438aeffbb58f447453fccbd9d3.pts 03001627/expert_verified/points_label/590d04438aeffbb58f447453fccbd9d3.seg 03001627 +03001627/points/cdfa898eadf316122056b4bd5d870b47.pts 03001627/expert_verified/points_label/cdfa898eadf316122056b4bd5d870b47.seg 03001627 +03001627/points/8e678a54f2ee4e5e492d9da2668ec34c.pts 03001627/expert_verified/points_label/8e678a54f2ee4e5e492d9da2668ec34c.seg 03001627 +04379243/points/1804dd6f5c827c1a4bf8d5f43e57b138.pts 04379243/expert_verified/points_label/1804dd6f5c827c1a4bf8d5f43e57b138.seg 04379243 +02691156/points/23eed87ac79f1b152f9c405cf0817830.pts 02691156/expert_verified/points_label/23eed87ac79f1b152f9c405cf0817830.seg 02691156 +02691156/points/97bc5fffde64178f43afdb9c81ff2967.pts 02691156/expert_verified/points_label/97bc5fffde64178f43afdb9c81ff2967.seg 02691156 +03001627/points/3b1f1913f2bc0dc171dbe96559c7bcae.pts 03001627/expert_verified/points_label/3b1f1913f2bc0dc171dbe96559c7bcae.seg 03001627 +04379243/points/82e1c0b874b0a9e035cd53a06b1d2317.pts 04379243/expert_verified/points_label/82e1c0b874b0a9e035cd53a06b1d2317.seg 04379243 +03001627/points/e0a0d5c2ba6fdca215b55266697a17be.pts 03001627/expert_verified/points_label/e0a0d5c2ba6fdca215b55266697a17be.seg 03001627 +03636649/points/9b558be5e2b60e3eb09f0ca9c143fdfd.pts 03636649/expert_verified/points_label/9b558be5e2b60e3eb09f0ca9c143fdfd.seg 03636649 +03001627/points/813be9a8485050571563f0911e3e5fc0.pts 03001627/expert_verified/points_label/813be9a8485050571563f0911e3e5fc0.seg 03001627 +02958343/points/6ca9967adcf862a461c6c61410fc904b.pts 02958343/expert_verified/points_label/6ca9967adcf862a461c6c61410fc904b.seg 02958343 +03624134/points/5663637633c938d1395331ebe4786cd.pts 03624134/expert_verified/points_label/5663637633c938d1395331ebe4786cd.seg 03624134 +03636649/points/ec8dc2311d381a9e3d39d8012919dd25.pts 03636649/expert_verified/points_label/ec8dc2311d381a9e3d39d8012919dd25.seg 03636649 +04379243/points/b685208ccf38786a6f1e07a56c129dfc.pts 04379243/expert_verified/points_label/b685208ccf38786a6f1e07a56c129dfc.seg 04379243 +03636649/points/ce621e6df1ab9ae35d2cdb96c1afe34.pts 03636649/expert_verified/points_label/ce621e6df1ab9ae35d2cdb96c1afe34.seg 03636649 +02691156/points/b092d523bdd320e4ca8607f540cc62ba.pts 02691156/expert_verified/points_label/b092d523bdd320e4ca8607f540cc62ba.seg 02691156 +04379243/points/401fe961ec7b0cb5dcfcef693e7ec696.pts 04379243/expert_verified/points_label/401fe961ec7b0cb5dcfcef693e7ec696.seg 04379243 +04225987/points/1e5fd1de723cc66cbb1ed6d4d8526a19.pts 04225987/expert_verified/points_label/1e5fd1de723cc66cbb1ed6d4d8526a19.seg 04225987 +03001627/points/b987a2ca54c6ddecb74697ced5978572.pts 03001627/expert_verified/points_label/b987a2ca54c6ddecb74697ced5978572.seg 03001627 +04379243/points/3e42e3386f4aea9277cf3bb06f394ad.pts 04379243/expert_verified/points_label/3e42e3386f4aea9277cf3bb06f394ad.seg 04379243 +02958343/points/1198255e3d20d2f323f3ca54768fe2ee.pts 02958343/expert_verified/points_label/1198255e3d20d2f323f3ca54768fe2ee.seg 02958343 +04379243/points/2b564ff0989caf58ab610b0c94236463.pts 04379243/expert_verified/points_label/2b564ff0989caf58ab610b0c94236463.seg 04379243 +03636649/points/941271c5d9b192eaccd8f9b9403fd602.pts 03636649/expert_verified/points_label/941271c5d9b192eaccd8f9b9403fd602.seg 03636649 +02691156/points/6aeae52e38f892a7e0091ae06332b2d5.pts 02691156/expert_verified/points_label/6aeae52e38f892a7e0091ae06332b2d5.seg 02691156 +04379243/points/4cdfd605352adcb0da13974b3533fb59.pts 04379243/expert_verified/points_label/4cdfd605352adcb0da13974b3533fb59.seg 04379243 +04379243/points/7c24e4f8778e224799a5e8f6c5baa224.pts 04379243/expert_verified/points_label/7c24e4f8778e224799a5e8f6c5baa224.seg 04379243 +03001627/points/6272c21e439e0205c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/6272c21e439e0205c8687ff9b0b4e4ac.seg 03001627 +02691156/points/acd8f367c36a3d84fc7a6d75b3d807ff.pts 02691156/expert_verified/points_label/acd8f367c36a3d84fc7a6d75b3d807ff.seg 02691156 +04379243/points/d58bdda16e6bba6f796740c80be6053.pts 04379243/expert_verified/points_label/d58bdda16e6bba6f796740c80be6053.seg 04379243 +03636649/points/f97506704760741b460fa882e24b7e4a.pts 03636649/expert_verified/points_label/f97506704760741b460fa882e24b7e4a.seg 03636649 +03636649/points/9f5c3ea9f8254b8bd42b9650f19dd425.pts 03636649/expert_verified/points_label/9f5c3ea9f8254b8bd42b9650f19dd425.seg 03636649 +03797390/points/79e673336e836d1333becb3a9550cbb1.pts 03797390/expert_verified/points_label/79e673336e836d1333becb3a9550cbb1.seg 03797390 +03948459/points/2d573d37cce5b48b9f433921788191f3.pts 03948459/expert_verified/points_label/2d573d37cce5b48b9f433921788191f3.seg 03948459 +04379243/points/7aaad1c5c2be8c24a9ed7bb5b55809f8.pts 04379243/expert_verified/points_label/7aaad1c5c2be8c24a9ed7bb5b55809f8.seg 04379243 +04379243/points/c6c412c771ab0ae015a34fa27bdf3d03.pts 04379243/expert_verified/points_label/c6c412c771ab0ae015a34fa27bdf3d03.seg 04379243 +03467517/points/819251e11b46438ff6ff9bebca919581.pts 03467517/expert_verified/points_label/819251e11b46438ff6ff9bebca919581.seg 03467517 +03001627/points/51f4ea68be319fe8990e5087098e19c.pts 03001627/expert_verified/points_label/51f4ea68be319fe8990e5087098e19c.seg 03001627 +03467517/points/66b24797480ba515d57700c05b1862d8.pts 03467517/expert_verified/points_label/66b24797480ba515d57700c05b1862d8.seg 03467517 +03790512/points/9d3b07f4475d501e8249f134aca4c817.pts 03790512/expert_verified/points_label/9d3b07f4475d501e8249f134aca4c817.seg 03790512 +04379243/points/72cfb60a075369ab7252c133a7e17d94.pts 04379243/expert_verified/points_label/72cfb60a075369ab7252c133a7e17d94.seg 04379243 +04379243/points/12a2733fc5f6b31ef8574543281e850f.pts 04379243/expert_verified/points_label/12a2733fc5f6b31ef8574543281e850f.seg 04379243 +03636649/points/aed950102f1e9c7a659dda512294c744.pts 03636649/expert_verified/points_label/aed950102f1e9c7a659dda512294c744.seg 03636649 +03001627/points/3126c6e9277b775b245ac1812a4e4d0c.pts 03001627/expert_verified/points_label/3126c6e9277b775b245ac1812a4e4d0c.seg 03001627 +02958343/points/8decf42b145f98d148d2ba4615e03b21.pts 02958343/expert_verified/points_label/8decf42b145f98d148d2ba4615e03b21.seg 02958343 +03467517/points/2f9bd6e61e038d8fd4b4ae2ff4c58b57.pts 03467517/expert_verified/points_label/2f9bd6e61e038d8fd4b4ae2ff4c58b57.seg 03467517 +03467517/points/6a983b2ff1b8a42e1285d7bfa3e922e4.pts 03467517/expert_verified/points_label/6a983b2ff1b8a42e1285d7bfa3e922e4.seg 03467517 +03261776/points/e33d6e8e39a75268957b6a4f3924d982.pts 03261776/expert_verified/points_label/e33d6e8e39a75268957b6a4f3924d982.seg 03261776 +04379243/points/fe2f2b120d84ed909b896cf832106977.pts 04379243/expert_verified/points_label/fe2f2b120d84ed909b896cf832106977.seg 04379243 +02958343/points/1328a95d69cefe32f200a72c9245aee7.pts 02958343/expert_verified/points_label/1328a95d69cefe32f200a72c9245aee7.seg 02958343 +03001627/points/58409b308683d908ca2bec46a3b47519.pts 03001627/expert_verified/points_label/58409b308683d908ca2bec46a3b47519.seg 03001627 +03001627/points/507a5070cde81fd867936ca58e67cec6.pts 03001627/expert_verified/points_label/507a5070cde81fd867936ca58e67cec6.seg 03001627 +04379243/points/ec68e1edbb7e9bc7e93cebb6ba9ca43e.pts 04379243/expert_verified/points_label/ec68e1edbb7e9bc7e93cebb6ba9ca43e.seg 04379243 +03001627/points/7facccfa81369078a8930422448288ea.pts 03001627/expert_verified/points_label/7facccfa81369078a8930422448288ea.seg 03001627 +03001627/points/be0c5a0e91c99e804e1a714ee619465a.pts 03001627/expert_verified/points_label/be0c5a0e91c99e804e1a714ee619465a.seg 03001627 +03001627/points/d73e46e07bdb3fe75fe4ecea39e8bd40.pts 03001627/expert_verified/points_label/d73e46e07bdb3fe75fe4ecea39e8bd40.seg 03001627 +03636649/points/122fb7bfa09c184ca249f8489bc060dd.pts 03636649/expert_verified/points_label/122fb7bfa09c184ca249f8489bc060dd.seg 03636649 +03001627/points/9ef3323c6ced7dfef313a0fb5fd4d79.pts 03001627/expert_verified/points_label/9ef3323c6ced7dfef313a0fb5fd4d79.seg 03001627 +02691156/points/d8452d4fe51f2bab3554ccf8c30febe7.pts 02691156/expert_verified/points_label/d8452d4fe51f2bab3554ccf8c30febe7.seg 02691156 +02691156/points/d59d75f52ac9b241ae0d772a1c85134a.pts 02691156/expert_verified/points_label/d59d75f52ac9b241ae0d772a1c85134a.seg 02691156 +02691156/points/f9e80ce23d9536623fddedb0bf24c68a.pts 02691156/expert_verified/points_label/f9e80ce23d9536623fddedb0bf24c68a.seg 02691156 +02691156/points/e69631d34410f99ac4f72bf08dc79a6.pts 02691156/expert_verified/points_label/e69631d34410f99ac4f72bf08dc79a6.seg 02691156 +04379243/points/f7196ec7d732af5166decb1b3cdc5557.pts 04379243/expert_verified/points_label/f7196ec7d732af5166decb1b3cdc5557.seg 04379243 +03261776/points/c5e47b627cb7818f17e22b7299bb7bc6.pts 03261776/expert_verified/points_label/c5e47b627cb7818f17e22b7299bb7bc6.seg 03261776 +03001627/points/5a60c649a221293d72ed554eb3baedcc.pts 03001627/expert_verified/points_label/5a60c649a221293d72ed554eb3baedcc.seg 03001627 +04379243/points/b117aac2e13630bb5d23c9bbb429abf9.pts 04379243/expert_verified/points_label/b117aac2e13630bb5d23c9bbb429abf9.seg 04379243 +03642806/points/e4c34c87ed1bc2191ef7a71d6e01357e.pts 03642806/expert_verified/points_label/e4c34c87ed1bc2191ef7a71d6e01357e.seg 03642806 +02691156/points/3fb7ceab42d7b17219ba010ddb4974fe.pts 02691156/expert_verified/points_label/3fb7ceab42d7b17219ba010ddb4974fe.seg 02691156 +04379243/points/fc472163ea149f8e19fb4103277a6b93.pts 04379243/expert_verified/points_label/fc472163ea149f8e19fb4103277a6b93.seg 04379243 +03001627/points/5ef73c9bee1b4adcd019a8a03d4a2a3.pts 03001627/expert_verified/points_label/5ef73c9bee1b4adcd019a8a03d4a2a3.seg 03001627 +02691156/points/384e72f69e6f24404cb288947cda4a2c.pts 02691156/expert_verified/points_label/384e72f69e6f24404cb288947cda4a2c.seg 02691156 +03636649/points/3fca250636e2b47a8d0fc77aab7a8d33.pts 03636649/expert_verified/points_label/3fca250636e2b47a8d0fc77aab7a8d33.seg 03636649 +04379243/points/46957ba752c3554bd42b9650f19dd425.pts 04379243/expert_verified/points_label/46957ba752c3554bd42b9650f19dd425.seg 04379243 +03001627/points/bce7ff621a5440bb34ee5c94ebdf7f1d.pts 03001627/expert_verified/points_label/bce7ff621a5440bb34ee5c94ebdf7f1d.seg 03001627 +02691156/points/66ae19841350ac2d4ba2821676102936.pts 02691156/expert_verified/points_label/66ae19841350ac2d4ba2821676102936.seg 02691156 +03001627/points/e53b07b648e8d041107a17cfae0b6df6.pts 03001627/expert_verified/points_label/e53b07b648e8d041107a17cfae0b6df6.seg 03001627 +03624134/points/d1c757548ead4a4d8d03ca4865da5b6.pts 03624134/expert_verified/points_label/d1c757548ead4a4d8d03ca4865da5b6.seg 03624134 +04379243/points/d19b4bde0766723c9b3bb0ef2a08be04.pts 04379243/expert_verified/points_label/d19b4bde0766723c9b3bb0ef2a08be04.seg 04379243 +03001627/points/6ecec258a1b6fe2a6fee8e2140acec9.pts 03001627/expert_verified/points_label/6ecec258a1b6fe2a6fee8e2140acec9.seg 03001627 +02691156/points/ab95a4e7f2d3cf9ca8607f540cc62ba.pts 02691156/expert_verified/points_label/ab95a4e7f2d3cf9ca8607f540cc62ba.seg 02691156 +03624134/points/b61c9b5f29ad581c860a45e027159a9a.pts 03624134/expert_verified/points_label/b61c9b5f29ad581c860a45e027159a9a.seg 03624134 +03001627/points/c7da2d72f9927f1881dff5c2e57ad46e.pts 03001627/expert_verified/points_label/c7da2d72f9927f1881dff5c2e57ad46e.seg 03001627 +04379243/points/b9886dd3c4a651f3664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/b9886dd3c4a651f3664b3b9b23ddfcbc.seg 04379243 +02691156/points/abc465975af79827dfb86dddee1d6ac3.pts 02691156/expert_verified/points_label/abc465975af79827dfb86dddee1d6ac3.seg 02691156 +03636649/points/7be01530bf43f2ed8a83637b92bdc7.pts 03636649/expert_verified/points_label/7be01530bf43f2ed8a83637b92bdc7.seg 03636649 +02691156/points/b81339a2f1dbc0de9598ceb95c7f0752.pts 02691156/expert_verified/points_label/b81339a2f1dbc0de9598ceb95c7f0752.seg 02691156 +03001627/points/69709cb300ae3784ee72e5c46412e9a7.pts 03001627/expert_verified/points_label/69709cb300ae3784ee72e5c46412e9a7.seg 03001627 +03001627/points/ec25a41ca233ed096e5a467428553af2.pts 03001627/expert_verified/points_label/ec25a41ca233ed096e5a467428553af2.seg 03001627 +04379243/points/4e9394f9f64859aef4ef86403cccc399.pts 04379243/expert_verified/points_label/4e9394f9f64859aef4ef86403cccc399.seg 04379243 +04379243/points/c477235c02413bfc44d2ca62bee212a0.pts 04379243/expert_verified/points_label/c477235c02413bfc44d2ca62bee212a0.seg 04379243 +04379243/points/41b0491fdb14d41bd25ca1a27cf9bdec.pts 04379243/expert_verified/points_label/41b0491fdb14d41bd25ca1a27cf9bdec.seg 04379243 +02691156/points/59eecc0a983a27a8130cc35407fba74a.pts 02691156/expert_verified/points_label/59eecc0a983a27a8130cc35407fba74a.seg 02691156 +03467517/points/22129fab1497437cc3f912172873d52f.pts 03467517/expert_verified/points_label/22129fab1497437cc3f912172873d52f.seg 03467517 +04379243/points/6365205d2324234fc8a1efeb4b91d393.pts 04379243/expert_verified/points_label/6365205d2324234fc8a1efeb4b91d393.seg 04379243 +03001627/points/2a75b2bb82d7f77c3f9d6e0ade5188b0.pts 03001627/expert_verified/points_label/2a75b2bb82d7f77c3f9d6e0ade5188b0.seg 03001627 +03001627/points/8f226d6b3089d3b7bca860dd9b04c52c.pts 03001627/expert_verified/points_label/8f226d6b3089d3b7bca860dd9b04c52c.seg 03001627 +03624134/points/5e515b18ed17a418b056c98b2e5e5e4e.pts 03624134/expert_verified/points_label/5e515b18ed17a418b056c98b2e5e5e4e.seg 03624134 +02691156/points/5bc41589eba11a4e15477d594f1fbd99.pts 02691156/expert_verified/points_label/5bc41589eba11a4e15477d594f1fbd99.seg 02691156 +03001627/points/2bbf00f0c583fd8a4b3c42e318f3affc.pts 03001627/expert_verified/points_label/2bbf00f0c583fd8a4b3c42e318f3affc.seg 03001627 +03790512/points/9e9300a6e1caec217395d58407f193ba.pts 03790512/expert_verified/points_label/9e9300a6e1caec217395d58407f193ba.seg 03790512 +03636649/points/81894e0739e3fea9d49b2e04785f8492.pts 03636649/expert_verified/points_label/81894e0739e3fea9d49b2e04785f8492.seg 03636649 +02958343/points/cdc8453c63ffc13e20f29d4da2b76f7a.pts 02958343/expert_verified/points_label/cdc8453c63ffc13e20f29d4da2b76f7a.seg 02958343 +04379243/points/7a0b6685a30298fb8ae8d7de284e7d2.pts 04379243/expert_verified/points_label/7a0b6685a30298fb8ae8d7de284e7d2.seg 04379243 +03001627/points/c5ee6b77f9f84adeed52100e321c9f3e.pts 03001627/expert_verified/points_label/c5ee6b77f9f84adeed52100e321c9f3e.seg 03001627 +04379243/points/4e87db85d5dab96822339a4b4aacca6b.pts 04379243/expert_verified/points_label/4e87db85d5dab96822339a4b4aacca6b.seg 04379243 +02958343/points/6dbae14e481e8fb9333e0bf0b765fa12.pts 02958343/expert_verified/points_label/6dbae14e481e8fb9333e0bf0b765fa12.seg 02958343 +03467517/points/bad8978268948ea3d3eb77b119df6d.pts 03467517/expert_verified/points_label/bad8978268948ea3d3eb77b119df6d.seg 03467517 +03001627/points/c552529c54b0612e53041c49040be3d5.pts 03001627/expert_verified/points_label/c552529c54b0612e53041c49040be3d5.seg 03001627 +02958343/points/dca8ed788347b28c171cf359a50c99bc.pts 02958343/expert_verified/points_label/dca8ed788347b28c171cf359a50c99bc.seg 02958343 +04379243/points/99720647e210078beaf288f952624966.pts 04379243/expert_verified/points_label/99720647e210078beaf288f952624966.seg 04379243 +03001627/points/b1f4b2c32f8a2fa77ee217c21e683487.pts 03001627/expert_verified/points_label/b1f4b2c32f8a2fa77ee217c21e683487.seg 03001627 +04379243/points/41cdb5b619790d5a74eb542502c2205f.pts 04379243/expert_verified/points_label/41cdb5b619790d5a74eb542502c2205f.seg 04379243 +04379243/points/a25141a07c77c25467de2aaf749e5256.pts 04379243/expert_verified/points_label/a25141a07c77c25467de2aaf749e5256.seg 04379243 +04379243/points/e9c3a3aa2278608bec15b38012222fa8.pts 04379243/expert_verified/points_label/e9c3a3aa2278608bec15b38012222fa8.seg 04379243 +03636649/points/8e025c4aa0b0201a81a172d69c52a28a.pts 03636649/expert_verified/points_label/8e025c4aa0b0201a81a172d69c52a28a.seg 03636649 +03001627/points/e175bc785390e8f6c05575120a46cd3b.pts 03001627/expert_verified/points_label/e175bc785390e8f6c05575120a46cd3b.seg 03001627 +02691156/points/ecb4ae05d7dd135a619550d2af0b6117.pts 02691156/expert_verified/points_label/ecb4ae05d7dd135a619550d2af0b6117.seg 02691156 +02691156/points/87069f21b11c180799a771d197c7b487.pts 02691156/expert_verified/points_label/87069f21b11c180799a771d197c7b487.seg 02691156 +02691156/points/ca11efc8928c10908b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/ca11efc8928c10908b96ae1a0a8b84ec.seg 02691156 +03790512/points/365c1f92a54c8cb52a45a87054fa7272.pts 03790512/expert_verified/points_label/365c1f92a54c8cb52a45a87054fa7272.seg 03790512 +03636649/points/23040992da19679aaa7cb30470f3273c.pts 03636649/expert_verified/points_label/23040992da19679aaa7cb30470f3273c.seg 03636649 +02691156/points/9441549e323552f2f001dddaf44c449b.pts 02691156/expert_verified/points_label/9441549e323552f2f001dddaf44c449b.seg 02691156 +02958343/points/17bfc66c6bc0a99d68c415156b102065.pts 02958343/expert_verified/points_label/17bfc66c6bc0a99d68c415156b102065.seg 02958343 +03001627/points/671d34c27cc0f1bf2deeb5ec76cf103b.pts 03001627/expert_verified/points_label/671d34c27cc0f1bf2deeb5ec76cf103b.seg 03001627 +03642806/points/464edfe14e9fa45c3394926146371698.pts 03642806/expert_verified/points_label/464edfe14e9fa45c3394926146371698.seg 03642806 +04379243/points/279c8601278e827dab610b0c94236463.pts 04379243/expert_verified/points_label/279c8601278e827dab610b0c94236463.seg 04379243 +04379243/points/29d9c6d84c6a126917b431cae0dd70ed.pts 04379243/expert_verified/points_label/29d9c6d84c6a126917b431cae0dd70ed.seg 04379243 +04379243/points/5d3d902051858e56ed1397afd2317e5b.pts 04379243/expert_verified/points_label/5d3d902051858e56ed1397afd2317e5b.seg 04379243 +02958343/points/aa78d4465ae18312711f9e3a79a13dcf.pts 02958343/expert_verified/points_label/aa78d4465ae18312711f9e3a79a13dcf.seg 02958343 +03001627/points/d561ff6788ab46517b016084e2ae95e.pts 03001627/expert_verified/points_label/d561ff6788ab46517b016084e2ae95e.seg 03001627 +03001627/points/b24ed89d85b74771216fff6094e6695c.pts 03001627/expert_verified/points_label/b24ed89d85b74771216fff6094e6695c.seg 03001627 +03636649/points/f6eeb5d67c32616648fda83c10428379.pts 03636649/expert_verified/points_label/f6eeb5d67c32616648fda83c10428379.seg 03636649 +03001627/points/3b3a9f4e3aa9f2f4d39a194653571dfc.pts 03001627/expert_verified/points_label/3b3a9f4e3aa9f2f4d39a194653571dfc.seg 03001627 +03001627/points/bd0b06e158bcee8ac0d89fc15154c9a2.pts 03001627/expert_verified/points_label/bd0b06e158bcee8ac0d89fc15154c9a2.seg 03001627 +04379243/points/89251f322490e7047e38640a31d0bc3.pts 04379243/expert_verified/points_label/89251f322490e7047e38640a31d0bc3.seg 04379243 +03001627/points/935f5e58e9e15231febad4f49b26ec52.pts 03001627/expert_verified/points_label/935f5e58e9e15231febad4f49b26ec52.seg 03001627 +03467517/points/8f59fee745f1e37ea5c8e9fc8b2242fd.pts 03467517/expert_verified/points_label/8f59fee745f1e37ea5c8e9fc8b2242fd.seg 03467517 +02691156/points/fddcb2b3d45ce98e641c309f1fd7e183.pts 02691156/expert_verified/points_label/fddcb2b3d45ce98e641c309f1fd7e183.seg 02691156 +03001627/points/d915d2f1664bf76e71a70be9f12ce8b0.pts 03001627/expert_verified/points_label/d915d2f1664bf76e71a70be9f12ce8b0.seg 03001627 +02958343/points/1ae9732840a315afab2c2809513f396e.pts 02958343/expert_verified/points_label/1ae9732840a315afab2c2809513f396e.seg 02958343 +04379243/points/b658e507c84d6202610c2a68437007d6.pts 04379243/expert_verified/points_label/b658e507c84d6202610c2a68437007d6.seg 04379243 +02958343/points/707d1e19b465d075adbfb30d8d1b297e.pts 02958343/expert_verified/points_label/707d1e19b465d075adbfb30d8d1b297e.seg 02958343 +04379243/points/5b74412eba257e5182b796aa5845e185.pts 04379243/expert_verified/points_label/5b74412eba257e5182b796aa5845e185.seg 04379243 +03636649/points/a801be11157a7f243d39d8012919dd25.pts 03636649/expert_verified/points_label/a801be11157a7f243d39d8012919dd25.seg 03636649 +02691156/points/26e10058cf9835aaca8607f540cc62ba.pts 02691156/expert_verified/points_label/26e10058cf9835aaca8607f540cc62ba.seg 02691156 +03636649/points/bc704db7b62582e5d1cbf3e52b9b6237.pts 03636649/expert_verified/points_label/bc704db7b62582e5d1cbf3e52b9b6237.seg 03636649 +02691156/points/d2e2e23f5be557e2d1ab3b031c100cb1.pts 02691156/expert_verified/points_label/d2e2e23f5be557e2d1ab3b031c100cb1.seg 02691156 +03001627/points/920af478601258e24762da3a3017ade.pts 03001627/expert_verified/points_label/920af478601258e24762da3a3017ade.seg 03001627 +03001627/points/3ffd794e5100258483bc207d8a5912e3.pts 03001627/expert_verified/points_label/3ffd794e5100258483bc207d8a5912e3.seg 03001627 +04379243/points/69c536d9e450cb79436e6787c76ef3f0.pts 04379243/expert_verified/points_label/69c536d9e450cb79436e6787c76ef3f0.seg 04379243 +04379243/points/6cf6a546e2ecbffe815a7efb12912.pts 04379243/expert_verified/points_label/6cf6a546e2ecbffe815a7efb12912.seg 04379243 +03001627/points/815f436a40c28da51f56aa11cd5e0c3e.pts 03001627/expert_verified/points_label/815f436a40c28da51f56aa11cd5e0c3e.seg 03001627 +03642806/points/4504a4d244d05ddbf5f79806bd65844f.pts 03642806/expert_verified/points_label/4504a4d244d05ddbf5f79806bd65844f.seg 03642806 +04379243/points/8ad9868947e7391113625562b56161f0.pts 04379243/expert_verified/points_label/8ad9868947e7391113625562b56161f0.seg 04379243 +03001627/points/6b9c3d42724275cf7a5c8cd74a7bc29a.pts 03001627/expert_verified/points_label/6b9c3d42724275cf7a5c8cd74a7bc29a.seg 03001627 +04379243/points/67e32538a35a5011a0ab1d82ef09f78f.pts 04379243/expert_verified/points_label/67e32538a35a5011a0ab1d82ef09f78f.seg 04379243 +03624134/points/2743e37a65e198d51592d7a04a86fa53.pts 03624134/expert_verified/points_label/2743e37a65e198d51592d7a04a86fa53.seg 03624134 +04379243/points/12df5c215f4364b7fe388cf6c4c3705d.pts 04379243/expert_verified/points_label/12df5c215f4364b7fe388cf6c4c3705d.seg 04379243 +02958343/points/55e0897c0ac089a6da5cb3be8feeaadc.pts 02958343/expert_verified/points_label/55e0897c0ac089a6da5cb3be8feeaadc.seg 02958343 +02773838/points/4e4fcfffec161ecaed13f430b2941481.pts 02773838/expert_verified/points_label/4e4fcfffec161ecaed13f430b2941481.seg 02773838 +04379243/points/8ce70dead5119191cc3492a06e9bd850.pts 04379243/expert_verified/points_label/8ce70dead5119191cc3492a06e9bd850.seg 04379243 +02691156/points/e033b6ad34586a86cc1c9e8218bfe7fc.pts 02691156/expert_verified/points_label/e033b6ad34586a86cc1c9e8218bfe7fc.seg 02691156 +03636649/points/600b2f00113ad714e2367b9e27f16a71.pts 03636649/expert_verified/points_label/600b2f00113ad714e2367b9e27f16a71.seg 03636649 +04379243/points/a74cad1781afed87dcfcef693e7ec696.pts 04379243/expert_verified/points_label/a74cad1781afed87dcfcef693e7ec696.seg 04379243 +03001627/points/5402eecc67e489502fa77440dcb93214.pts 03001627/expert_verified/points_label/5402eecc67e489502fa77440dcb93214.seg 03001627 +03001627/points/d5bd6ea417eba6ce456cbf78e1e89022.pts 03001627/expert_verified/points_label/d5bd6ea417eba6ce456cbf78e1e89022.seg 03001627 +03001627/points/d4edd167061dac5f52a3901fa1436b1a.pts 03001627/expert_verified/points_label/d4edd167061dac5f52a3901fa1436b1a.seg 03001627 +03636649/points/9fc3ddc511f4ef62dced62abd38a02b0.pts 03636649/expert_verified/points_label/9fc3ddc511f4ef62dced62abd38a02b0.seg 03636649 +02691156/points/92a83ecaa10e8d3f78e919a72d9a39e7.pts 02691156/expert_verified/points_label/92a83ecaa10e8d3f78e919a72d9a39e7.seg 02691156 +03001627/points/fee36ec8c8ae503fc68456e8da5b9a30.pts 03001627/expert_verified/points_label/fee36ec8c8ae503fc68456e8da5b9a30.seg 03001627 +04379243/points/1df409cfefbb51658b9b51ae4415d5aa.pts 04379243/expert_verified/points_label/1df409cfefbb51658b9b51ae4415d5aa.seg 04379243 +03001627/points/76283716a2c6586e266d673a6188bf4c.pts 03001627/expert_verified/points_label/76283716a2c6586e266d673a6188bf4c.seg 03001627 +04379243/points/29b2aaca87d19a3c5759f4335ff2e408.pts 04379243/expert_verified/points_label/29b2aaca87d19a3c5759f4335ff2e408.seg 04379243 +04379243/points/21ca4d36a0f6fa69b937d98d58545fa.pts 04379243/expert_verified/points_label/21ca4d36a0f6fa69b937d98d58545fa.seg 04379243 +02691156/points/da1acb401541235be4d2773f0358b43b.pts 02691156/expert_verified/points_label/da1acb401541235be4d2773f0358b43b.seg 02691156 +04379243/points/553c416f33c5e5e18b9b51ae4415d5aa.pts 04379243/expert_verified/points_label/553c416f33c5e5e18b9b51ae4415d5aa.seg 04379243 +04379243/points/174832b73cd6d91c9856fa70a578baeb.pts 04379243/expert_verified/points_label/174832b73cd6d91c9856fa70a578baeb.seg 04379243 +02691156/points/1c2e9dedbcf511e616a077c4c0fc1181.pts 02691156/expert_verified/points_label/1c2e9dedbcf511e616a077c4c0fc1181.seg 02691156 +03001627/points/893c689b192bbe33ebadcdfba7971b71.pts 03001627/expert_verified/points_label/893c689b192bbe33ebadcdfba7971b71.seg 03001627 +04379243/points/52037005fbff92d08fa35606145b47dc.pts 04379243/expert_verified/points_label/52037005fbff92d08fa35606145b47dc.seg 04379243 +04225987/points/e38a4e6fb32b51a1bebb1fbb949ea955.pts 04225987/expert_verified/points_label/e38a4e6fb32b51a1bebb1fbb949ea955.seg 04225987 +03636649/points/42bc0dce81734d892610e2a20d7c4b61.pts 03636649/expert_verified/points_label/42bc0dce81734d892610e2a20d7c4b61.seg 03636649 +04379243/points/cb7ebc943b1b424988386fe1512ed26f.pts 04379243/expert_verified/points_label/cb7ebc943b1b424988386fe1512ed26f.seg 04379243 +03624134/points/2d6e9b23e171760c3e332fb3cb6ebe50.pts 03624134/expert_verified/points_label/2d6e9b23e171760c3e332fb3cb6ebe50.seg 03624134 +04379243/points/d05ff7b47febe58a656db3f863b4b796.pts 04379243/expert_verified/points_label/d05ff7b47febe58a656db3f863b4b796.seg 04379243 +03636649/points/e178ab3b967c7fddc901d9dddb735c9f.pts 03636649/expert_verified/points_label/e178ab3b967c7fddc901d9dddb735c9f.seg 03636649 +04379243/points/527b2d1e964f056383be1aa5a5ab0c80.pts 04379243/expert_verified/points_label/527b2d1e964f056383be1aa5a5ab0c80.seg 04379243 +03001627/points/f1a1bb6ad29d703078d928ba1c4a6f75.pts 03001627/expert_verified/points_label/f1a1bb6ad29d703078d928ba1c4a6f75.seg 03001627 +04379243/points/ed9dc0937009dc031311158f08f2982a.pts 04379243/expert_verified/points_label/ed9dc0937009dc031311158f08f2982a.seg 04379243 +02691156/points/e41c5719ad09055f1b880c747ee1f83.pts 02691156/expert_verified/points_label/e41c5719ad09055f1b880c747ee1f83.seg 02691156 +04379243/points/34bbe284f7499df071a782a4379556c7.pts 04379243/expert_verified/points_label/34bbe284f7499df071a782a4379556c7.seg 04379243 +02691156/points/973df01cea43c7f690b1d6deb98feec6.pts 02691156/expert_verified/points_label/973df01cea43c7f690b1d6deb98feec6.seg 02691156 +03001627/points/ed97d1c954fca49851ceffe90913a32.pts 03001627/expert_verified/points_label/ed97d1c954fca49851ceffe90913a32.seg 03001627 +03001627/points/3a74e3d5172ee94fdef1c01cbd4ae0c.pts 03001627/expert_verified/points_label/3a74e3d5172ee94fdef1c01cbd4ae0c.seg 03001627 +04379243/points/194b279c7e892a2d15fa8082e5524f79.pts 04379243/expert_verified/points_label/194b279c7e892a2d15fa8082e5524f79.seg 04379243 +04379243/points/23ece3bf871619366ff454af1e8947f3.pts 04379243/expert_verified/points_label/23ece3bf871619366ff454af1e8947f3.seg 04379243 +02691156/points/7de379891610f5feaf7dd1bfd65143a9.pts 02691156/expert_verified/points_label/7de379891610f5feaf7dd1bfd65143a9.seg 02691156 +04379243/points/54ba7e77a2bf5fe3158b7df020486ff2.pts 04379243/expert_verified/points_label/54ba7e77a2bf5fe3158b7df020486ff2.seg 04379243 +03001627/points/39825fb4341ebd1ccb002c1e2b5fc68b.pts 03001627/expert_verified/points_label/39825fb4341ebd1ccb002c1e2b5fc68b.seg 03001627 +03001627/points/a32febea4a0ac30171a782a4379556c7.pts 03001627/expert_verified/points_label/a32febea4a0ac30171a782a4379556c7.seg 03001627 +02691156/points/b9ba988dd9a6cf426e8b6dd39a855b69.pts 02691156/expert_verified/points_label/b9ba988dd9a6cf426e8b6dd39a855b69.seg 02691156 +02691156/points/37b1f7f02c4b87dbca8607f540cc62ba.pts 02691156/expert_verified/points_label/37b1f7f02c4b87dbca8607f540cc62ba.seg 02691156 +04379243/points/8ce538a671c6e684d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/8ce538a671c6e684d93768e7b9b1eabf.seg 04379243 +04225987/points/48bf45bffab55d7cf14c37b285d25cdf.pts 04225987/expert_verified/points_label/48bf45bffab55d7cf14c37b285d25cdf.seg 04225987 +02691156/points/820ba20e5da8325f19ba010ddb4974fe.pts 02691156/expert_verified/points_label/820ba20e5da8325f19ba010ddb4974fe.seg 02691156 +02691156/points/ff52c059efaca3c1ca8607f540cc62ba.pts 02691156/expert_verified/points_label/ff52c059efaca3c1ca8607f540cc62ba.seg 02691156 +04379243/points/99737ff619cae25d6effbd64ad6b71b8.pts 04379243/expert_verified/points_label/99737ff619cae25d6effbd64ad6b71b8.seg 04379243 +04379243/points/e3b7fbed310c2c397c8d78b9aede742.pts 04379243/expert_verified/points_label/e3b7fbed310c2c397c8d78b9aede742.seg 04379243 +03001627/points/e8eedd37cb054e37b59d74a7c956bd18.pts 03001627/expert_verified/points_label/e8eedd37cb054e37b59d74a7c956bd18.seg 03001627 +03790512/points/8134a965cc0b134bb37378f3c85478b4.pts 03790512/expert_verified/points_label/8134a965cc0b134bb37378f3c85478b4.seg 03790512 +03636649/points/da5f13f4048dbd72fcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/da5f13f4048dbd72fcb8d8c6d4df8143.seg 03636649 +03001627/points/f5d8dd0309401ebac47a35332c17cce2.pts 03001627/expert_verified/points_label/f5d8dd0309401ebac47a35332c17cce2.seg 03001627 +02691156/points/521eab9363fdc2a07209009cfb89d4bd.pts 02691156/expert_verified/points_label/521eab9363fdc2a07209009cfb89d4bd.seg 02691156 +03636649/points/b1e552b454366a9d7787152e5befb05b.pts 03636649/expert_verified/points_label/b1e552b454366a9d7787152e5befb05b.seg 03636649 +02958343/points/8590a6c8270375e34b5a812ecf553410.pts 02958343/expert_verified/points_label/8590a6c8270375e34b5a812ecf553410.seg 02958343 +04379243/points/d46537f513283d6cdcfcef693e7ec696.pts 04379243/expert_verified/points_label/d46537f513283d6cdcfcef693e7ec696.seg 04379243 +03001627/points/60a5795c905f3bb157f5033576317e1.pts 03001627/expert_verified/points_label/60a5795c905f3bb157f5033576317e1.seg 03001627 +02691156/points/8996445c6d2407c0fb5c1b0f759e2bc1.pts 02691156/expert_verified/points_label/8996445c6d2407c0fb5c1b0f759e2bc1.seg 02691156 +03624134/points/5e15d63317014f30ceea8802f71596b5.pts 03624134/expert_verified/points_label/5e15d63317014f30ceea8802f71596b5.seg 03624134 +03642806/points/9d48ab8c41174e60888cad7f6c0e6001.pts 03642806/expert_verified/points_label/9d48ab8c41174e60888cad7f6c0e6001.seg 03642806 +04379243/points/4cd35d6ec155d39633207e4c3ac155a4.pts 04379243/expert_verified/points_label/4cd35d6ec155d39633207e4c3ac155a4.seg 04379243 +04379243/points/884d2cc0d3aa8a72640e544a5d67c33a.pts 04379243/expert_verified/points_label/884d2cc0d3aa8a72640e544a5d67c33a.seg 04379243 +03001627/points/8191bad981637a71b356ab8b24c147.pts 03001627/expert_verified/points_label/8191bad981637a71b356ab8b24c147.seg 03001627 +03261776/points/de3b9b253e8f1aaf8b15c58b209760b5.pts 03261776/expert_verified/points_label/de3b9b253e8f1aaf8b15c58b209760b5.seg 03261776 +03636649/points/5b744ac897fe8bc557f40ff86fe708ff.pts 03636649/expert_verified/points_label/5b744ac897fe8bc557f40ff86fe708ff.seg 03636649 +04379243/points/6cd84ff61583805c85e2af9bf984f0b5.pts 04379243/expert_verified/points_label/6cd84ff61583805c85e2af9bf984f0b5.seg 04379243 +04379243/points/e65066d6b0b83719c3bd24f986301745.pts 04379243/expert_verified/points_label/e65066d6b0b83719c3bd24f986301745.seg 04379243 +04379243/points/f3efcbd9745da90619fb4103277a6b93.pts 04379243/expert_verified/points_label/f3efcbd9745da90619fb4103277a6b93.seg 04379243 +04379243/points/8ac4d93e65b9d58d9b937d98d58545fa.pts 04379243/expert_verified/points_label/8ac4d93e65b9d58d9b937d98d58545fa.seg 04379243 +03636649/points/b69c3a0a46b932e3d3c1fbbc2200e255.pts 03636649/expert_verified/points_label/b69c3a0a46b932e3d3c1fbbc2200e255.seg 03636649 +03636649/points/5c7965b0835a1a241de9bf5a9c22fde.pts 03636649/expert_verified/points_label/5c7965b0835a1a241de9bf5a9c22fde.seg 03636649 +03001627/points/27ea798c55699b6d2c528d33bca1ac2.pts 03001627/expert_verified/points_label/27ea798c55699b6d2c528d33bca1ac2.seg 03001627 +03467517/points/dc623742d6d1518e19959b248340fafd.pts 03467517/expert_verified/points_label/dc623742d6d1518e19959b248340fafd.seg 03467517 +03001627/points/c6cb59e7645dd14d661ff085a0f14b7.pts 03001627/expert_verified/points_label/c6cb59e7645dd14d661ff085a0f14b7.seg 03001627 +03948459/points/a3679104af613021912d826efe946a9f.pts 03948459/expert_verified/points_label/a3679104af613021912d826efe946a9f.seg 03948459 +03467517/points/b6d2d35747549a5b93f0194265a9746c.pts 03467517/expert_verified/points_label/b6d2d35747549a5b93f0194265a9746c.seg 03467517 +02691156/points/2c1fff0653854166e7a636089598229.pts 02691156/expert_verified/points_label/2c1fff0653854166e7a636089598229.seg 02691156 +04379243/points/1040cd764facf6981190e285a2cbc9c.pts 04379243/expert_verified/points_label/1040cd764facf6981190e285a2cbc9c.seg 04379243 +03001627/points/485831d92925bf03f3d7c13662c10792.pts 03001627/expert_verified/points_label/485831d92925bf03f3d7c13662c10792.seg 03001627 +03636649/points/284986b4c72d624abd73284bc3c3cbac.pts 03636649/expert_verified/points_label/284986b4c72d624abd73284bc3c3cbac.seg 03636649 +02691156/points/4c008f39378be18bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/4c008f39378be18bc0909d98a1ff2b4.seg 02691156 +04379243/points/9611888ee0db1ecaf7d4d3ced798ad90.pts 04379243/expert_verified/points_label/9611888ee0db1ecaf7d4d3ced798ad90.seg 04379243 +03467517/points/12e30808350dd945f4b498e11fb60a4b.pts 03467517/expert_verified/points_label/12e30808350dd945f4b498e11fb60a4b.seg 03467517 +03467517/points/3243edb05f5e8803ac61a2f8346a8f.pts 03467517/expert_verified/points_label/3243edb05f5e8803ac61a2f8346a8f.seg 03467517 +04379243/points/ec4675f62f6946118cbb8bac2032149c.pts 04379243/expert_verified/points_label/ec4675f62f6946118cbb8bac2032149c.seg 04379243 +04379243/points/eb00a4e8b33d257cad16260d4d73b56.pts 04379243/expert_verified/points_label/eb00a4e8b33d257cad16260d4d73b56.seg 04379243 +03001627/points/5607b02869c1f8a019fb4103277a6b93.pts 03001627/expert_verified/points_label/5607b02869c1f8a019fb4103277a6b93.seg 03001627 +03636649/points/d456beea1501f278f70220cd6be776f7.pts 03636649/expert_verified/points_label/d456beea1501f278f70220cd6be776f7.seg 03636649 +02691156/points/3feeb5f8ecbfcb4ba8f0518e94fcfb22.pts 02691156/expert_verified/points_label/3feeb5f8ecbfcb4ba8f0518e94fcfb22.seg 02691156 +04379243/points/fe130356df1977499c2a886f3b75f1ff.pts 04379243/expert_verified/points_label/fe130356df1977499c2a886f3b75f1ff.seg 04379243 +02958343/points/aa7f127bb8cd9db73755eb267a6f3b6b.pts 02958343/expert_verified/points_label/aa7f127bb8cd9db73755eb267a6f3b6b.seg 02958343 +04379243/points/84a3c87bba5a472af51f77a6d7299806.pts 04379243/expert_verified/points_label/84a3c87bba5a472af51f77a6d7299806.seg 04379243 +04099429/points/2de8ee55ff69502863098049d14fe32f.pts 04099429/expert_verified/points_label/2de8ee55ff69502863098049d14fe32f.seg 04099429 +03624134/points/539ff9b2a7a0329e759e4c424bcdaafe.pts 03624134/expert_verified/points_label/539ff9b2a7a0329e759e4c424bcdaafe.seg 03624134 +03948459/points/f3f6678898938575575e33965575974.pts 03948459/expert_verified/points_label/f3f6678898938575575e33965575974.seg 03948459 +04379243/points/c26dfd3453d81bf7788eb1f5e7ba6e7b.pts 04379243/expert_verified/points_label/c26dfd3453d81bf7788eb1f5e7ba6e7b.seg 04379243 +03001627/points/8117c55b8bbdbbc54c5c5c89015f1980.pts 03001627/expert_verified/points_label/8117c55b8bbdbbc54c5c5c89015f1980.seg 03001627 +03624134/points/40ccb8ac250e0ea5880595487ba7a30b.pts 03624134/expert_verified/points_label/40ccb8ac250e0ea5880595487ba7a30b.seg 03624134 +04379243/points/a0d2754011acdcc9d8a0e410093d6619.pts 04379243/expert_verified/points_label/a0d2754011acdcc9d8a0e410093d6619.seg 04379243 +03790512/points/5bd41c7d3e158ac93ff4d2f5a7608a24.pts 03790512/expert_verified/points_label/5bd41c7d3e158ac93ff4d2f5a7608a24.seg 03790512 +04379243/points/8f440a7c0e2af79f3ed0ffd59feeec00.pts 04379243/expert_verified/points_label/8f440a7c0e2af79f3ed0ffd59feeec00.seg 04379243 +03001627/points/734ac9809aada180d18df440db206fb1.pts 03001627/expert_verified/points_label/734ac9809aada180d18df440db206fb1.seg 03001627 +03001627/points/54f33a7cb3621d5ced98cca8f0ccd5f7.pts 03001627/expert_verified/points_label/54f33a7cb3621d5ced98cca8f0ccd5f7.seg 03001627 +03001627/points/d274fc14092387c1e17e1cb731e2fa4f.pts 03001627/expert_verified/points_label/d274fc14092387c1e17e1cb731e2fa4f.seg 03001627 +03636649/points/6ccb43088eda061dbfc838749f053cf9.pts 03636649/expert_verified/points_label/6ccb43088eda061dbfc838749f053cf9.seg 03636649 +02773838/points/1b9ef45fefefa35ed13f430b2941481.pts 02773838/expert_verified/points_label/1b9ef45fefefa35ed13f430b2941481.seg 02773838 +03001627/points/35053caa62eea36c116cc4e115d5fd2.pts 03001627/expert_verified/points_label/35053caa62eea36c116cc4e115d5fd2.seg 03001627 +04379243/points/b893c20bfb5d718371a782a4379556c7.pts 04379243/expert_verified/points_label/b893c20bfb5d718371a782a4379556c7.seg 04379243 +04379243/points/1a5062241d7903076f88aa1b7f7cc6c6.pts 04379243/expert_verified/points_label/1a5062241d7903076f88aa1b7f7cc6c6.seg 04379243 +02958343/points/add26d8f4f91ba04c84b95bddf75b22d.pts 02958343/expert_verified/points_label/add26d8f4f91ba04c84b95bddf75b22d.seg 02958343 +03636649/points/f85f26c5a807b22312bea13341a54c3f.pts 03636649/expert_verified/points_label/f85f26c5a807b22312bea13341a54c3f.seg 03636649 +03001627/points/8a232028c2b2cfad43649af30eba8304.pts 03001627/expert_verified/points_label/8a232028c2b2cfad43649af30eba8304.seg 03001627 +03636649/points/3a5a0f4c78e17b284f0c4075db76b7c.pts 03636649/expert_verified/points_label/3a5a0f4c78e17b284f0c4075db76b7c.seg 03636649 +04379243/points/df811f7a858750875634c21965ee6bab.pts 04379243/expert_verified/points_label/df811f7a858750875634c21965ee6bab.seg 04379243 +02691156/points/48706d323b9041d5438a95791ca4064d.pts 02691156/expert_verified/points_label/48706d323b9041d5438a95791ca4064d.seg 02691156 +03790512/points/170cfc531a4fd09fe6905ba5363784c3.pts 03790512/expert_verified/points_label/170cfc531a4fd09fe6905ba5363784c3.seg 03790512 +03467517/points/d4b2ddb52e8dcd3593f0194265a9746c.pts 03467517/expert_verified/points_label/d4b2ddb52e8dcd3593f0194265a9746c.seg 03467517 +03636649/points/2af78c0b040634e5881cd5e2fd8f0f3b.pts 03636649/expert_verified/points_label/2af78c0b040634e5881cd5e2fd8f0f3b.seg 03636649 +04379243/points/90cd6a48cf2789a9b430d97a45d5824.pts 04379243/expert_verified/points_label/90cd6a48cf2789a9b430d97a45d5824.seg 04379243 +03001627/points/43290694390ad1adfc735c9ceab0161a.pts 03001627/expert_verified/points_label/43290694390ad1adfc735c9ceab0161a.seg 03001627 +03636649/points/ed57181b9e7644a3f51f77a6d7299806.pts 03636649/expert_verified/points_label/ed57181b9e7644a3f51f77a6d7299806.seg 03636649 +03261776/points/a9661a8bb610d902957b6a4f3924d982.pts 03261776/expert_verified/points_label/a9661a8bb610d902957b6a4f3924d982.seg 03261776 +02691156/points/b31bbc50a0d3a4366cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/b31bbc50a0d3a4366cf1b4a8fc3914e.seg 02691156 +03001627/points/cd5ad4afabaed0d3e762624dc3c8fa2a.pts 03001627/expert_verified/points_label/cd5ad4afabaed0d3e762624dc3c8fa2a.seg 03001627 +02958343/points/d2e1dc21db9b45df6436916a86a90ed7.pts 02958343/expert_verified/points_label/d2e1dc21db9b45df6436916a86a90ed7.seg 02958343 +02691156/points/de9e093bb17848c3b2bd4a92202f8700.pts 02691156/expert_verified/points_label/de9e093bb17848c3b2bd4a92202f8700.seg 02691156 +03467517/points/40cd2cafde62ff7ca24eeca91f583600.pts 03467517/expert_verified/points_label/40cd2cafde62ff7ca24eeca91f583600.seg 03467517 +02958343/points/56e0fef0632aed0f1d27be7764701cfe.pts 02958343/expert_verified/points_label/56e0fef0632aed0f1d27be7764701cfe.seg 02958343 +04379243/points/a4d149a48607de3d92f4c88fd91c6b1b.pts 04379243/expert_verified/points_label/a4d149a48607de3d92f4c88fd91c6b1b.seg 04379243 +03636649/points/45f11cb4099c9c87bbc7a6acbd8f058b.pts 03636649/expert_verified/points_label/45f11cb4099c9c87bbc7a6acbd8f058b.seg 03636649 +04379243/points/3558aeeb9698722acf19858fd1963d10.pts 04379243/expert_verified/points_label/3558aeeb9698722acf19858fd1963d10.seg 04379243 +03636649/points/2a52bd01472ec7e1589ec67c01f5c1a7.pts 03636649/expert_verified/points_label/2a52bd01472ec7e1589ec67c01f5c1a7.seg 03636649 +03467517/points/58bb21c325f021088f01c8e793a6e062.pts 03467517/expert_verified/points_label/58bb21c325f021088f01c8e793a6e062.seg 03467517 +04379243/points/3997cdee934a9b238eb3bc6c6d15f9bf.pts 04379243/expert_verified/points_label/3997cdee934a9b238eb3bc6c6d15f9bf.seg 04379243 +03001627/points/c4cab2a416a4537e2871cc0b3cc1a485.pts 03001627/expert_verified/points_label/c4cab2a416a4537e2871cc0b3cc1a485.seg 03001627 +04379243/points/6aaa78b81528f4846674ff79eed6185a.pts 04379243/expert_verified/points_label/6aaa78b81528f4846674ff79eed6185a.seg 04379243 +03636649/points/fd5f6ab819910a66dc7f95a5a82e36f7.pts 03636649/expert_verified/points_label/fd5f6ab819910a66dc7f95a5a82e36f7.seg 03636649 +04379243/points/8e3303cae6cc104bad4f8ccb153c24e.pts 04379243/expert_verified/points_label/8e3303cae6cc104bad4f8ccb153c24e.seg 04379243 +03001627/points/2f0318b23d899a84493f17f4fe9b9eb2.pts 03001627/expert_verified/points_label/2f0318b23d899a84493f17f4fe9b9eb2.seg 03001627 +04379243/points/2406cdcd4c60c84132884c4c87a2e061.pts 04379243/expert_verified/points_label/2406cdcd4c60c84132884c4c87a2e061.seg 04379243 +03790512/points/55caf44a43f2c04d468bac13e007a6e9.pts 03790512/expert_verified/points_label/55caf44a43f2c04d468bac13e007a6e9.seg 03790512 +03001627/points/ee665ce6679ac8cfb502ac2eb9128f9a.pts 03001627/expert_verified/points_label/ee665ce6679ac8cfb502ac2eb9128f9a.seg 03001627 +02691156/points/32edb6ba5788dc12d8ff6111270336a9.pts 02691156/expert_verified/points_label/32edb6ba5788dc12d8ff6111270336a9.seg 02691156 +03636649/points/d0fde1daedab10365240248232b90795.pts 03636649/expert_verified/points_label/d0fde1daedab10365240248232b90795.seg 03636649 +04379243/points/61b88b501933ebae8f7068c66465c4d6.pts 04379243/expert_verified/points_label/61b88b501933ebae8f7068c66465c4d6.seg 04379243 +03001627/points/93556cf01e19f638bf80985a99195eb8.pts 03001627/expert_verified/points_label/93556cf01e19f638bf80985a99195eb8.seg 03001627 +04379243/points/f3b8c91c5dd1cb6b8722573b29f0d6d8.pts 04379243/expert_verified/points_label/f3b8c91c5dd1cb6b8722573b29f0d6d8.seg 04379243 +04379243/points/eae36b396f6b5f97664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/eae36b396f6b5f97664b3b9b23ddfcbc.seg 04379243 +03624134/points/8bd5c4f395695ebdf40d02cc9d84a93a.pts 03624134/expert_verified/points_label/8bd5c4f395695ebdf40d02cc9d84a93a.seg 03624134 +03001627/points/8c81ff18e04584547f409062bafc8e2.pts 03001627/expert_verified/points_label/8c81ff18e04584547f409062bafc8e2.seg 03001627 +03001627/points/77e7660d71c6f3befebad4f49b26ec52.pts 03001627/expert_verified/points_label/77e7660d71c6f3befebad4f49b26ec52.seg 03001627 +03261776/points/bc404e52bfcd2038538cf6df9faa9b65.pts 03261776/expert_verified/points_label/bc404e52bfcd2038538cf6df9faa9b65.seg 03261776 +03001627/points/f09af71bebd4bea8a2651abaf391628e.pts 03001627/expert_verified/points_label/f09af71bebd4bea8a2651abaf391628e.seg 03001627 +03001627/points/8c8efbe62a1547942b90a0fb76278f6f.pts 03001627/expert_verified/points_label/8c8efbe62a1547942b90a0fb76278f6f.seg 03001627 +04379243/points/aed5697ff59e3d3035478a6869a3602d.pts 04379243/expert_verified/points_label/aed5697ff59e3d3035478a6869a3602d.seg 04379243 +02691156/points/5ac00867c7d78b1690b1d6deb98feec6.pts 02691156/expert_verified/points_label/5ac00867c7d78b1690b1d6deb98feec6.seg 02691156 +03001627/points/c709aa613431c0538a653a9f65a410f6.pts 03001627/expert_verified/points_label/c709aa613431c0538a653a9f65a410f6.seg 03001627 +03624134/points/8facbe9d9f4da233d15a5887ec2183c9.pts 03624134/expert_verified/points_label/8facbe9d9f4da233d15a5887ec2183c9.seg 03624134 +03642806/points/dbcd5a88a9d4f1d7579cfe4420588034.pts 03642806/expert_verified/points_label/dbcd5a88a9d4f1d7579cfe4420588034.seg 03642806 +03636649/points/f29a94f969dd55ffc35131da26f8061a.pts 03636649/expert_verified/points_label/f29a94f969dd55ffc35131da26f8061a.seg 03636649 +02958343/points/5e014eb2bd03daab9fbe97de4a41d527.pts 02958343/expert_verified/points_label/5e014eb2bd03daab9fbe97de4a41d527.seg 02958343 +04379243/points/7105bd044f464358beedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/7105bd044f464358beedb4c8fd29e2d1.seg 04379243 +04379243/points/c827c0d4ef212f2b30cb1fe6fdc7d605.pts 04379243/expert_verified/points_label/c827c0d4ef212f2b30cb1fe6fdc7d605.seg 04379243 +04379243/points/19bc9c781df1da46824080f516909671.pts 04379243/expert_verified/points_label/19bc9c781df1da46824080f516909671.seg 04379243 +03001627/points/71b53a5f441d45b742b7e4c0136bdb7e.pts 03001627/expert_verified/points_label/71b53a5f441d45b742b7e4c0136bdb7e.seg 03001627 +02958343/points/e7e94f8dbbe8c1e9784da3853aae78cd.pts 02958343/expert_verified/points_label/e7e94f8dbbe8c1e9784da3853aae78cd.seg 02958343 +03790512/points/832c4a316c419228b37378f3c85478b4.pts 03790512/expert_verified/points_label/832c4a316c419228b37378f3c85478b4.seg 03790512 +02954340/points/c7122c44495a5ac6aceb0fa31f18f016.pts 02954340/expert_verified/points_label/c7122c44495a5ac6aceb0fa31f18f016.seg 02954340 +03001627/points/6b32d3a9198f8b03d1dcc55e36186e4e.pts 03001627/expert_verified/points_label/6b32d3a9198f8b03d1dcc55e36186e4e.seg 03001627 +03636649/points/7893d0b50a7b6a768ec45924afa4ac91.pts 03636649/expert_verified/points_label/7893d0b50a7b6a768ec45924afa4ac91.seg 03636649 +02691156/points/befcb95d80e0e49119ba010ddb4974fe.pts 02691156/expert_verified/points_label/befcb95d80e0e49119ba010ddb4974fe.seg 02691156 +03001627/points/b70600293bab55c0593ebeeedbff73b.pts 03001627/expert_verified/points_label/b70600293bab55c0593ebeeedbff73b.seg 03001627 +02691156/points/7fedb48b457ee9f31629b98cc1b1b992.pts 02691156/expert_verified/points_label/7fedb48b457ee9f31629b98cc1b1b992.seg 02691156 +04099429/points/e04bda8655d9e606ebcdf982796b4fa.pts 04099429/expert_verified/points_label/e04bda8655d9e606ebcdf982796b4fa.seg 04099429 +04379243/points/25bcea593e4314c3436e6787c76ef3f0.pts 04379243/expert_verified/points_label/25bcea593e4314c3436e6787c76ef3f0.seg 04379243 +03636649/points/f3a9cc3060fd6b0e6e4f8fc909e0d34e.pts 03636649/expert_verified/points_label/f3a9cc3060fd6b0e6e4f8fc909e0d34e.seg 03636649 +04379243/points/516928532093f765bababe11fcea8796.pts 04379243/expert_verified/points_label/516928532093f765bababe11fcea8796.seg 04379243 +03001627/points/31569815c88e79de4458bae25a4e518a.pts 03001627/expert_verified/points_label/31569815c88e79de4458bae25a4e518a.seg 03001627 +03001627/points/a08ad49c281128ea53615647c93fc704.pts 03001627/expert_verified/points_label/a08ad49c281128ea53615647c93fc704.seg 03001627 +03642806/points/f5fc954736b06be15fd06491ae919ea3.pts 03642806/expert_verified/points_label/f5fc954736b06be15fd06491ae919ea3.seg 03642806 +04379243/points/15b495c101881d96e2367b9e27f16a71.pts 04379243/expert_verified/points_label/15b495c101881d96e2367b9e27f16a71.seg 04379243 +02691156/points/ebd991666f177f8f575bf8a4b14be4f4.pts 02691156/expert_verified/points_label/ebd991666f177f8f575bf8a4b14be4f4.seg 02691156 +02691156/points/f7739764eb1c78a053f370d353cea84.pts 02691156/expert_verified/points_label/f7739764eb1c78a053f370d353cea84.seg 02691156 +03636649/points/8a6d770e6b4942c5ef3a2c64cef919d0.pts 03636649/expert_verified/points_label/8a6d770e6b4942c5ef3a2c64cef919d0.seg 03636649 +04379243/points/2fcc875b28c5557dcfcef693e7ec696.pts 04379243/expert_verified/points_label/2fcc875b28c5557dcfcef693e7ec696.seg 04379243 +03636649/points/896abd405c79547086485c798787f66b.pts 03636649/expert_verified/points_label/896abd405c79547086485c798787f66b.seg 03636649 +02691156/points/356a633ea047c549ca8607f540cc62ba.pts 02691156/expert_verified/points_label/356a633ea047c549ca8607f540cc62ba.seg 02691156 +03001627/points/c983108db7fcfa3619fb4103277a6b93.pts 03001627/expert_verified/points_label/c983108db7fcfa3619fb4103277a6b93.seg 03001627 +04225987/points/97f85bc59f09a9f455c660e6cd8e92b.pts 04225987/expert_verified/points_label/97f85bc59f09a9f455c660e6cd8e92b.seg 04225987 +03636649/points/69a708be7245f4c9786e8e92cc08146.pts 03636649/expert_verified/points_label/69a708be7245f4c9786e8e92cc08146.seg 03636649 +04379243/points/f71296c0a7e93ec282db9fca4b68095.pts 04379243/expert_verified/points_label/f71296c0a7e93ec282db9fca4b68095.seg 04379243 +02691156/points/33faf711ed54a4d3db22b838c125a50b.pts 02691156/expert_verified/points_label/33faf711ed54a4d3db22b838c125a50b.seg 02691156 +03642806/points/5d544ee4b094c6606436916a86a90ed7.pts 03642806/expert_verified/points_label/5d544ee4b094c6606436916a86a90ed7.seg 03642806 +02691156/points/a0d63ee7fd87f93619ba010ddb4974fe.pts 02691156/expert_verified/points_label/a0d63ee7fd87f93619ba010ddb4974fe.seg 02691156 +03001627/points/e30b412be565a1026efe57da6d3d385e.pts 03001627/expert_verified/points_label/e30b412be565a1026efe57da6d3d385e.seg 03001627 +04379243/points/fe5e1df0653804d6ce4670b160b81e9.pts 04379243/expert_verified/points_label/fe5e1df0653804d6ce4670b160b81e9.seg 04379243 +02691156/points/fd41d04f1aabbaea3fddedb0bf24c68a.pts 02691156/expert_verified/points_label/fd41d04f1aabbaea3fddedb0bf24c68a.seg 02691156 +03624134/points/e79481b2fde3a3ab340fbf70397ab69a.pts 03624134/expert_verified/points_label/e79481b2fde3a3ab340fbf70397ab69a.seg 03624134 +04379243/points/d06d27bc9ad1faabd7bf6fb68df7f786.pts 04379243/expert_verified/points_label/d06d27bc9ad1faabd7bf6fb68df7f786.seg 04379243 +03001627/points/e4931ffa06d7b05cb04cb542e2c50eb4.pts 03001627/expert_verified/points_label/e4931ffa06d7b05cb04cb542e2c50eb4.seg 03001627 +03001627/points/d4b5f8edc72b4676f4175ee3a177350a.pts 03001627/expert_verified/points_label/d4b5f8edc72b4676f4175ee3a177350a.seg 03001627 +03636649/points/4f16fffbe480b835276206fae5d3c473.pts 03636649/expert_verified/points_label/4f16fffbe480b835276206fae5d3c473.seg 03636649 +03001627/points/8ade914cd21b6e49656f29b05c68d39f.pts 03001627/expert_verified/points_label/8ade914cd21b6e49656f29b05c68d39f.seg 03001627 +03001627/points/1e304b967d5253d5dd079f8cece51712.pts 03001627/expert_verified/points_label/1e304b967d5253d5dd079f8cece51712.seg 03001627 +04379243/points/6d0ef6312f8af87a53e946fb2184f0c4.pts 04379243/expert_verified/points_label/6d0ef6312f8af87a53e946fb2184f0c4.seg 04379243 +03948459/points/79c0cac016998c7cf7ba4a82f8032357.pts 03948459/expert_verified/points_label/79c0cac016998c7cf7ba4a82f8032357.seg 03948459 +03642806/points/b51683c6285fa0f69067ac5c9d4ee692.pts 03642806/expert_verified/points_label/b51683c6285fa0f69067ac5c9d4ee692.seg 03642806 +04379243/points/93cdfd14889492dd91a4fd87fee47737.pts 04379243/expert_verified/points_label/93cdfd14889492dd91a4fd87fee47737.seg 04379243 +03636649/points/da8141b45da808199a06a7de97b096dc.pts 03636649/expert_verified/points_label/da8141b45da808199a06a7de97b096dc.seg 03636649 +04379243/points/7d22cd72bf2762b19a4b266ed4d507c9.pts 04379243/expert_verified/points_label/7d22cd72bf2762b19a4b266ed4d507c9.seg 04379243 +04225987/points/aa886bed91a13113d5498a74ca9ca78b.pts 04225987/expert_verified/points_label/aa886bed91a13113d5498a74ca9ca78b.seg 04225987 +04379243/points/55547d2fae0e3dc21705bfd3afcd10e.pts 04379243/expert_verified/points_label/55547d2fae0e3dc21705bfd3afcd10e.seg 04379243 +04379243/points/222c56ff9cddbaf4139eb23f7c8036f.pts 04379243/expert_verified/points_label/222c56ff9cddbaf4139eb23f7c8036f.seg 04379243 +03636649/points/292f1f97a543d735dedf3c967c85981a.pts 03636649/expert_verified/points_label/292f1f97a543d735dedf3c967c85981a.seg 03636649 +04379243/points/9e2318099f77d3df3527ecfeb345775f.pts 04379243/expert_verified/points_label/9e2318099f77d3df3527ecfeb345775f.seg 04379243 +04379243/points/6ace903899706a5819fb4103277a6b93.pts 04379243/expert_verified/points_label/6ace903899706a5819fb4103277a6b93.seg 04379243 +03636649/points/c080aefc6cbff8c81185ac82ed4da80d.pts 03636649/expert_verified/points_label/c080aefc6cbff8c81185ac82ed4da80d.seg 03636649 +03790512/points/9dd4ae1c34af4766b4f2746c8140d6d6.pts 03790512/expert_verified/points_label/9dd4ae1c34af4766b4f2746c8140d6d6.seg 03790512 +03001627/points/e199b1f6a70c9f56df44d20a516c07b3.pts 03001627/expert_verified/points_label/e199b1f6a70c9f56df44d20a516c07b3.seg 03001627 +04379243/points/8129d4c51abc3356bababe11fcea8796.pts 04379243/expert_verified/points_label/8129d4c51abc3356bababe11fcea8796.seg 04379243 +03001627/points/c9d8573a048c0e959c0ca344f487323e.pts 03001627/expert_verified/points_label/c9d8573a048c0e959c0ca344f487323e.seg 03001627 +04379243/points/25eefc5a3c7b30e1f103d473de33521a.pts 04379243/expert_verified/points_label/25eefc5a3c7b30e1f103d473de33521a.seg 04379243 +03624134/points/c20cca071ea58e3ef2c542131520d62e.pts 03624134/expert_verified/points_label/c20cca071ea58e3ef2c542131520d62e.seg 03624134 +03001627/points/c86cfe147872280463626070a93463cf.pts 03001627/expert_verified/points_label/c86cfe147872280463626070a93463cf.seg 03001627 +03001627/points/3853339519aca1bdfcd4910413c446d9.pts 03001627/expert_verified/points_label/3853339519aca1bdfcd4910413c446d9.seg 03001627 +03001627/points/8cb44a50906b827615e7ec87bf4cc5ab.pts 03001627/expert_verified/points_label/8cb44a50906b827615e7ec87bf4cc5ab.seg 03001627 +02691156/points/fd9f1cdaa381599bca8607f540cc62ba.pts 02691156/expert_verified/points_label/fd9f1cdaa381599bca8607f540cc62ba.seg 02691156 +03001627/points/80dabf9ddbdc92f681806e3880250dff.pts 03001627/expert_verified/points_label/80dabf9ddbdc92f681806e3880250dff.seg 03001627 +04379243/points/5919dea71f3bcb071d54ab02e78bef2.pts 04379243/expert_verified/points_label/5919dea71f3bcb071d54ab02e78bef2.seg 04379243 +03636649/points/292ba732e002629e68c2f5eb1dd4dfaa.pts 03636649/expert_verified/points_label/292ba732e002629e68c2f5eb1dd4dfaa.seg 03636649 +04379243/points/5d77e8f6ad3741a0c30ab36bf7b0552.pts 04379243/expert_verified/points_label/5d77e8f6ad3741a0c30ab36bf7b0552.seg 04379243 +03467517/points/21a517abc4729e6e352e5d4d2615db5b.pts 03467517/expert_verified/points_label/21a517abc4729e6e352e5d4d2615db5b.seg 03467517 +03467517/points/6554f6429eb7b67585e3c97721f726e4.pts 03467517/expert_verified/points_label/6554f6429eb7b67585e3c97721f726e4.seg 03467517 +02958343/points/f84ba2039d0a4ec5afe717997470b28d.pts 02958343/expert_verified/points_label/f84ba2039d0a4ec5afe717997470b28d.seg 02958343 +02691156/points/29fd29045703ff18b4a8b7176ed97248.pts 02691156/expert_verified/points_label/29fd29045703ff18b4a8b7176ed97248.seg 02691156 +03467517/points/a7f449a1f2cd1f1693f0194265a9746c.pts 03467517/expert_verified/points_label/a7f449a1f2cd1f1693f0194265a9746c.seg 03467517 +03790512/points/7fcee59a33976221a88e8cb97b773125.pts 03790512/expert_verified/points_label/7fcee59a33976221a88e8cb97b773125.seg 03790512 +04099429/points/2407c2684ee757e89c4176ab56cb612.pts 04099429/expert_verified/points_label/2407c2684ee757e89c4176ab56cb612.seg 04099429 +04379243/points/f621e2ad900ad48535836c728d324152.pts 04379243/expert_verified/points_label/f621e2ad900ad48535836c728d324152.seg 04379243 +03001627/points/9a54daea9071a536bf80985a99195eb8.pts 03001627/expert_verified/points_label/9a54daea9071a536bf80985a99195eb8.seg 03001627 +03001627/points/fd9e909b082d8175d319c38340319ae4.pts 03001627/expert_verified/points_label/fd9e909b082d8175d319c38340319ae4.seg 03001627 +03001627/points/a8dd9990ecd74c45435897641a7ee684.pts 03001627/expert_verified/points_label/a8dd9990ecd74c45435897641a7ee684.seg 03001627 +03636649/points/c6424950ca9447627d8864caa856253b.pts 03636649/expert_verified/points_label/c6424950ca9447627d8864caa856253b.seg 03636649 +03948459/points/7f3ec97cfaea31137504cc74f24f0eee.pts 03948459/expert_verified/points_label/7f3ec97cfaea31137504cc74f24f0eee.seg 03948459 +02691156/points/43abe330362164e99be82ec29531a70f.pts 02691156/expert_verified/points_label/43abe330362164e99be82ec29531a70f.seg 02691156 +03001627/points/499c4b519c708ae84cd08aa7c510fb85.pts 03001627/expert_verified/points_label/499c4b519c708ae84cd08aa7c510fb85.seg 03001627 +04379243/points/4c7931492b41f960d50eef20e0914a48.pts 04379243/expert_verified/points_label/4c7931492b41f960d50eef20e0914a48.seg 04379243 +03001627/points/3f36e261cc87648ac3bd24f986301745.pts 03001627/expert_verified/points_label/3f36e261cc87648ac3bd24f986301745.seg 03001627 +03001627/points/a09a88c11d0b27368821ad3452f1c8c9.pts 03001627/expert_verified/points_label/a09a88c11d0b27368821ad3452f1c8c9.seg 03001627 +04379243/points/89cc879f005dcf50f1f50f6a678fb494.pts 04379243/expert_verified/points_label/89cc879f005dcf50f1f50f6a678fb494.seg 04379243 +02958343/points/d34b0494fc4d756ab927782fc69a1fbb.pts 02958343/expert_verified/points_label/d34b0494fc4d756ab927782fc69a1fbb.seg 02958343 +02958343/points/705840df46a582e2ac826a3c82da491.pts 02958343/expert_verified/points_label/705840df46a582e2ac826a3c82da491.seg 02958343 +02691156/points/74a5f937c22aa08a3e70653c1b3170b5.pts 02691156/expert_verified/points_label/74a5f937c22aa08a3e70653c1b3170b5.seg 02691156 +03948459/points/a0a1633186261a031274aa253a241db2.pts 03948459/expert_verified/points_label/a0a1633186261a031274aa253a241db2.seg 03948459 +03001627/points/2de04227fae28e70b6eb6f056d511fe1.pts 03001627/expert_verified/points_label/2de04227fae28e70b6eb6f056d511fe1.seg 03001627 +02691156/points/1e9ef313876bfba7d02c6d35cc802839.pts 02691156/expert_verified/points_label/1e9ef313876bfba7d02c6d35cc802839.seg 02691156 +03636649/points/e99793b871d27333d42b9650f19dd425.pts 03636649/expert_verified/points_label/e99793b871d27333d42b9650f19dd425.seg 03636649 +03001627/points/7228d43e00af4c1e2746490e2236e9a8.pts 03001627/expert_verified/points_label/7228d43e00af4c1e2746490e2236e9a8.seg 03001627 +03636649/points/66111d2c7a23b0feb404555b84577afb.pts 03636649/expert_verified/points_label/66111d2c7a23b0feb404555b84577afb.seg 03636649 +03001627/points/2499541ace317cbb8cb5d9909aeb1309.pts 03001627/expert_verified/points_label/2499541ace317cbb8cb5d9909aeb1309.seg 03001627 +04379243/points/d151d9f45d8b14536cd661fb5fd95741.pts 04379243/expert_verified/points_label/d151d9f45d8b14536cd661fb5fd95741.seg 04379243 +03001627/points/ea7be2b97e78d5b35a4480134e0cdd21.pts 03001627/expert_verified/points_label/ea7be2b97e78d5b35a4480134e0cdd21.seg 03001627 +02958343/points/9c35f00f81110738783854950b26f0d3.pts 02958343/expert_verified/points_label/9c35f00f81110738783854950b26f0d3.seg 02958343 +03001627/points/e30bd575bbd6c68c9710e093c764abec.pts 03001627/expert_verified/points_label/e30bd575bbd6c68c9710e093c764abec.seg 03001627 +03790512/points/61b17f12bec91d057395d58407f193ba.pts 03790512/expert_verified/points_label/61b17f12bec91d057395d58407f193ba.seg 03790512 +04379243/points/cd895c35fff495cdd0b93fa304cfa755.pts 04379243/expert_verified/points_label/cd895c35fff495cdd0b93fa304cfa755.seg 04379243 +02958343/points/b70d970f8020c25dd141480e2c154d3.pts 02958343/expert_verified/points_label/b70d970f8020c25dd141480e2c154d3.seg 02958343 +04379243/points/2642d805c53e243d629f73b53bd7a234.pts 04379243/expert_verified/points_label/2642d805c53e243d629f73b53bd7a234.seg 04379243 +04379243/points/1bce2f4937d36446a32c566d71fa585c.pts 04379243/expert_verified/points_label/1bce2f4937d36446a32c566d71fa585c.seg 04379243 +04379243/points/7c1bcea89b0037a2d67bd369ec608dad.pts 04379243/expert_verified/points_label/7c1bcea89b0037a2d67bd369ec608dad.seg 04379243 +04379243/points/3154c61c595bd600e56ddd87eb888f65.pts 04379243/expert_verified/points_label/3154c61c595bd600e56ddd87eb888f65.seg 04379243 +03001627/points/7a1de77ca204eaf28a514cac7cb18507.pts 03001627/expert_verified/points_label/7a1de77ca204eaf28a514cac7cb18507.seg 03001627 +04379243/points/77ecc55547840f06d42b9650f19dd425.pts 04379243/expert_verified/points_label/77ecc55547840f06d42b9650f19dd425.seg 04379243 +02691156/points/9a8aecab136ce50db7ef47444625afb2.pts 02691156/expert_verified/points_label/9a8aecab136ce50db7ef47444625afb2.seg 02691156 +02958343/points/24866846d728484e1d1a964dea8a7aab.pts 02958343/expert_verified/points_label/24866846d728484e1d1a964dea8a7aab.seg 02958343 +04099429/points/9b75297c580ff937b61ce5beb9f92726.pts 04099429/expert_verified/points_label/9b75297c580ff937b61ce5beb9f92726.seg 04099429 +04225987/points/90dbe261a4d56dcf1082f2ea630bf69e.pts 04225987/expert_verified/points_label/90dbe261a4d56dcf1082f2ea630bf69e.seg 04225987 +03001627/points/81b27636162e148bb3fb065fa3089331.pts 03001627/expert_verified/points_label/81b27636162e148bb3fb065fa3089331.seg 03001627 +03642806/points/66d47a84a3d522dc9311bf79d4774e73.pts 03642806/expert_verified/points_label/66d47a84a3d522dc9311bf79d4774e73.seg 03642806 +03001627/points/2a05ae00b701fda36567137a59cb1a56.pts 03001627/expert_verified/points_label/2a05ae00b701fda36567137a59cb1a56.seg 03001627 +04379243/points/79df23303a3192c1cdf1dfd78f33901b.pts 04379243/expert_verified/points_label/79df23303a3192c1cdf1dfd78f33901b.seg 04379243 +04379243/points/bf17779bec6abccf161bc5243aab8ea4.pts 04379243/expert_verified/points_label/bf17779bec6abccf161bc5243aab8ea4.seg 04379243 +03001627/points/ece1a921c1bfd44947f5e245ee376525.pts 03001627/expert_verified/points_label/ece1a921c1bfd44947f5e245ee376525.seg 03001627 +03636649/points/15c51ecb58bf304fef3a2c64cef919d0.pts 03636649/expert_verified/points_label/15c51ecb58bf304fef3a2c64cef919d0.seg 03636649 +04379243/points/5d93e285b2006520ab610b0c94236463.pts 04379243/expert_verified/points_label/5d93e285b2006520ab610b0c94236463.seg 04379243 +03636649/points/b2d5929e66044aeac7db9c21ccfbc4a1.pts 03636649/expert_verified/points_label/b2d5929e66044aeac7db9c21ccfbc4a1.seg 03636649 +04379243/points/f3164e1781a296597f6f00dc967c386.pts 04379243/expert_verified/points_label/f3164e1781a296597f6f00dc967c386.seg 04379243 +04379243/points/798a07e42d76013582695d8aaeacccc5.pts 04379243/expert_verified/points_label/798a07e42d76013582695d8aaeacccc5.seg 04379243 +03948459/points/cc014e78b5cd9e7ed957eaf7f4edb205.pts 03948459/expert_verified/points_label/cc014e78b5cd9e7ed957eaf7f4edb205.seg 03948459 +03636649/points/b3a98808fb1ccd892a5041fadf25a502.pts 03636649/expert_verified/points_label/b3a98808fb1ccd892a5041fadf25a502.seg 03636649 +04379243/points/9472c006a5d35b9ab606ece4189242ff.pts 04379243/expert_verified/points_label/9472c006a5d35b9ab606ece4189242ff.seg 04379243 +03001627/points/3f04adffb69b5ebee95cd0dc8c2f0e83.pts 03001627/expert_verified/points_label/3f04adffb69b5ebee95cd0dc8c2f0e83.seg 03001627 +03001627/points/26aa22bd1da8b8c5b1a5c6ecbc81953c.pts 03001627/expert_verified/points_label/26aa22bd1da8b8c5b1a5c6ecbc81953c.seg 03001627 +03001627/points/f68ecc9ec512915f36d8dd30a594b2af.pts 03001627/expert_verified/points_label/f68ecc9ec512915f36d8dd30a594b2af.seg 03001627 +03642806/points/6489453e322cdb53f9f3c6290096f50f.pts 03642806/expert_verified/points_label/6489453e322cdb53f9f3c6290096f50f.seg 03642806 +03001627/points/c53fa6829ec9a947d13b7d13ee32497.pts 03001627/expert_verified/points_label/c53fa6829ec9a947d13b7d13ee32497.seg 03001627 +04379243/points/7f1bd688960e2c1b97f2016c3d6097c9.pts 04379243/expert_verified/points_label/7f1bd688960e2c1b97f2016c3d6097c9.seg 04379243 +02958343/points/edb2ab8a1d7e20f36436916a86a90ed7.pts 02958343/expert_verified/points_label/edb2ab8a1d7e20f36436916a86a90ed7.seg 02958343 +04379243/points/159a2a760327ca5bababe11fcea8796.pts 04379243/expert_verified/points_label/159a2a760327ca5bababe11fcea8796.seg 04379243 +02958343/points/988108a7536d686824065b218dc1b5b9.pts 02958343/expert_verified/points_label/988108a7536d686824065b218dc1b5b9.seg 02958343 +03636649/points/c695408a86062c4d242ea50288b3f64.pts 03636649/expert_verified/points_label/c695408a86062c4d242ea50288b3f64.seg 03636649 +04379243/points/2e7cb2cbfbbb4d002ee19ebe356c2dcb.pts 04379243/expert_verified/points_label/2e7cb2cbfbbb4d002ee19ebe356c2dcb.seg 04379243 +02691156/points/3d23703a618ce7df1e569ed4e4cfe84.pts 02691156/expert_verified/points_label/3d23703a618ce7df1e569ed4e4cfe84.seg 02691156 +03636649/points/97b7d9aabe38f91df11c97be803c47d.pts 03636649/expert_verified/points_label/97b7d9aabe38f91df11c97be803c47d.seg 03636649 +04379243/points/5be1589df948b227c955e5ed03ef3a2f.pts 04379243/expert_verified/points_label/5be1589df948b227c955e5ed03ef3a2f.seg 04379243 +04379243/points/8ea7ca2c8b48eb68ab610b0c94236463.pts 04379243/expert_verified/points_label/8ea7ca2c8b48eb68ab610b0c94236463.seg 04379243 +02958343/points/eb56379e243b0e2090da6b3e2ed8b49d.pts 02958343/expert_verified/points_label/eb56379e243b0e2090da6b3e2ed8b49d.seg 02958343 +03001627/points/cc30a723aeba69a139e0f39f5249b0ba.pts 03001627/expert_verified/points_label/cc30a723aeba69a139e0f39f5249b0ba.seg 03001627 +03001627/points/ff8efd10f5e6c5c7c6c0380e62f2644.pts 03001627/expert_verified/points_label/ff8efd10f5e6c5c7c6c0380e62f2644.seg 03001627 +02691156/points/d0ee4253d406b3f05e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/d0ee4253d406b3f05e9e2656aff7dd5b.seg 02691156 +02691156/points/9afe827a622d8ca28699933784576e73.pts 02691156/expert_verified/points_label/9afe827a622d8ca28699933784576e73.seg 02691156 +03467517/points/d82fc6db200cdf6ea24eeca91f583600.pts 03467517/expert_verified/points_label/d82fc6db200cdf6ea24eeca91f583600.seg 03467517 +03642806/points/6123321e3af0b6328204b359ccd3949e.pts 03642806/expert_verified/points_label/6123321e3af0b6328204b359ccd3949e.seg 03642806 +03636649/points/e15defcb3dd448094fffb007974c9976.pts 03636649/expert_verified/points_label/e15defcb3dd448094fffb007974c9976.seg 03636649 +03001627/points/c7fe45610d10cb108ad3a7d07aac2767.pts 03001627/expert_verified/points_label/c7fe45610d10cb108ad3a7d07aac2767.seg 03001627 +04379243/points/bfaa1c23d2622422ad16260d4d73b56.pts 04379243/expert_verified/points_label/bfaa1c23d2622422ad16260d4d73b56.seg 04379243 +04379243/points/8e3fc5f1f8e9658ce8b2b8dc0c816caf.pts 04379243/expert_verified/points_label/8e3fc5f1f8e9658ce8b2b8dc0c816caf.seg 04379243 +03467517/points/1a96f73d0929bd4793f0194265a9746c.pts 03467517/expert_verified/points_label/1a96f73d0929bd4793f0194265a9746c.seg 03467517 +02691156/points/86b11ae736659136ca8607f540cc62ba.pts 02691156/expert_verified/points_label/86b11ae736659136ca8607f540cc62ba.seg 02691156 +04379243/points/4c4c719ac4b61d8f812c9aaa38f9a422.pts 04379243/expert_verified/points_label/4c4c719ac4b61d8f812c9aaa38f9a422.seg 04379243 +04379243/points/443eca86041e57ab1e99b149cff6a230.pts 04379243/expert_verified/points_label/443eca86041e57ab1e99b149cff6a230.seg 04379243 +03948459/points/6b2d89a7f2b173f0d9deb3f829cc2475.pts 03948459/expert_verified/points_label/6b2d89a7f2b173f0d9deb3f829cc2475.seg 03948459 +04379243/points/8d84471c4af977d917271868b642acd3.pts 04379243/expert_verified/points_label/8d84471c4af977d917271868b642acd3.seg 04379243 +03636649/points/b78bef16d4f44844931e98da3a93e73e.pts 03636649/expert_verified/points_label/b78bef16d4f44844931e98da3a93e73e.seg 03636649 +03636649/points/29985e44b73051d923500a5b036df62e.pts 03636649/expert_verified/points_label/29985e44b73051d923500a5b036df62e.seg 03636649 +03642806/points/4f3575df3821e08c466909b3e9553909.pts 03642806/expert_verified/points_label/4f3575df3821e08c466909b3e9553909.seg 03642806 +03001627/points/3774a2b8c71e70b9f18a36d57b7cced0.pts 03001627/expert_verified/points_label/3774a2b8c71e70b9f18a36d57b7cced0.seg 03001627 +03001627/points/3ea40a75f22515557dcf230d8b7d162e.pts 03001627/expert_verified/points_label/3ea40a75f22515557dcf230d8b7d162e.seg 03001627 +03001627/points/33c4f94e97c3fefd19fb4103277a6b93.pts 03001627/expert_verified/points_label/33c4f94e97c3fefd19fb4103277a6b93.seg 03001627 +03636649/points/d7760d5f9e1e6a622cd2160e449d45ae.pts 03636649/expert_verified/points_label/d7760d5f9e1e6a622cd2160e449d45ae.seg 03636649 +02954340/points/7f9ddfff396634f17790cd6f6e8952aa.pts 02954340/expert_verified/points_label/7f9ddfff396634f17790cd6f6e8952aa.seg 02954340 +03001627/points/5e706e87ca60bd19ecb01bc908e8cea6.pts 03001627/expert_verified/points_label/5e706e87ca60bd19ecb01bc908e8cea6.seg 03001627 +04379243/points/90c19c729cabdb864b8710a3469971b1.pts 04379243/expert_verified/points_label/90c19c729cabdb864b8710a3469971b1.seg 04379243 +02691156/points/d08471df3e76602427743256ca3834f.pts 02691156/expert_verified/points_label/d08471df3e76602427743256ca3834f.seg 02691156 +02958343/points/67c229c70e64a25e69c2e0a91b39f742.pts 02958343/expert_verified/points_label/67c229c70e64a25e69c2e0a91b39f742.seg 02958343 +04379243/points/1011e1c9812b84d2a9ed7bb5b55809f8.pts 04379243/expert_verified/points_label/1011e1c9812b84d2a9ed7bb5b55809f8.seg 04379243 +03636649/points/3e2d51c40b37c9c086052e834fbd2c4a.pts 03636649/expert_verified/points_label/3e2d51c40b37c9c086052e834fbd2c4a.seg 03636649 +03001627/points/6b385a32489bab4abbc7a6acbd8f058b.pts 03001627/expert_verified/points_label/6b385a32489bab4abbc7a6acbd8f058b.seg 03001627 +03001627/points/61d29e8133da0b58d1fd43e2bf80195.pts 03001627/expert_verified/points_label/61d29e8133da0b58d1fd43e2bf80195.seg 03001627 +04379243/points/d5f2968e4b7254ccf4104961857ca9c.pts 04379243/expert_verified/points_label/d5f2968e4b7254ccf4104961857ca9c.seg 04379243 +04379243/points/30c9865cfc4294a7ad16260d4d73b56.pts 04379243/expert_verified/points_label/30c9865cfc4294a7ad16260d4d73b56.seg 04379243 +03001627/points/76919a456a23b9779368d1198f406e7.pts 03001627/expert_verified/points_label/76919a456a23b9779368d1198f406e7.seg 03001627 +03001627/points/c12da8acb2c7973597e755dddca14449.pts 03001627/expert_verified/points_label/c12da8acb2c7973597e755dddca14449.seg 03001627 +02958343/points/a5dcd1196a1ffa9739f20966eb25504f.pts 02958343/expert_verified/points_label/a5dcd1196a1ffa9739f20966eb25504f.seg 02958343 +02691156/points/1deb997079e0b3cd6c1cd53dbc9f7b8e.pts 02691156/expert_verified/points_label/1deb997079e0b3cd6c1cd53dbc9f7b8e.seg 02691156 +03636649/points/afb7cc3bbc3595a4e9b3dff83c7ff715.pts 03636649/expert_verified/points_label/afb7cc3bbc3595a4e9b3dff83c7ff715.seg 03636649 +03636649/points/b4aee889d5e2a826f6747912091f1965.pts 03636649/expert_verified/points_label/b4aee889d5e2a826f6747912091f1965.seg 03636649 +03636649/points/ea71ba1d8d8c8e5888a1de3dc61bfeef.pts 03636649/expert_verified/points_label/ea71ba1d8d8c8e5888a1de3dc61bfeef.seg 03636649 +02958343/points/b0c2225ab347e28f1a48cf85d161a723.pts 02958343/expert_verified/points_label/b0c2225ab347e28f1a48cf85d161a723.seg 02958343 +03001627/points/1ab8a3b55c14a7b27eaeab1f0c9120b7.pts 03001627/expert_verified/points_label/1ab8a3b55c14a7b27eaeab1f0c9120b7.seg 03001627 +03261776/points/c6d19db35f69bae7b6d9c2cee7f2f72b.pts 03261776/expert_verified/points_label/c6d19db35f69bae7b6d9c2cee7f2f72b.seg 03261776 +03001627/points/6d6e634ff34bd350c511e6b9b3b344f3.pts 03001627/expert_verified/points_label/6d6e634ff34bd350c511e6b9b3b344f3.seg 03001627 +02691156/points/ce682d7a2bbf77b6fc4b92d3d335214a.pts 02691156/expert_verified/points_label/ce682d7a2bbf77b6fc4b92d3d335214a.seg 02691156 +03261776/points/943048e64cc2bc980a070963925e308.pts 03261776/expert_verified/points_label/943048e64cc2bc980a070963925e308.seg 03261776 +03642806/points/5a63c5f29f0bc0eb12d8efb2f101da03.pts 03642806/expert_verified/points_label/5a63c5f29f0bc0eb12d8efb2f101da03.seg 03642806 +04379243/points/19678fdb9bc926505e4b35ff1ea95f37.pts 04379243/expert_verified/points_label/19678fdb9bc926505e4b35ff1ea95f37.seg 04379243 +02958343/points/52f2a2472411fe2e6b418c7d9fedcaa9.pts 02958343/expert_verified/points_label/52f2a2472411fe2e6b418c7d9fedcaa9.seg 02958343 +03001627/points/1ee92a9d78cccbda98d2e7dbe701ca48.pts 03001627/expert_verified/points_label/1ee92a9d78cccbda98d2e7dbe701ca48.seg 03001627 +03001627/points/795f38ce5d8519938077cafed2bb8242.pts 03001627/expert_verified/points_label/795f38ce5d8519938077cafed2bb8242.seg 03001627 +03001627/points/5e5121cc58c4fea78ce66f12ba927a2b.pts 03001627/expert_verified/points_label/5e5121cc58c4fea78ce66f12ba927a2b.seg 03001627 +03001627/points/b998016472e9dd7a9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/b998016472e9dd7a9b9f2eb77f5e247e.seg 03001627 +04379243/points/30b506e5e1fc282afdfcfddf24fb29ec.pts 04379243/expert_verified/points_label/30b506e5e1fc282afdfcfddf24fb29ec.seg 04379243 +03624134/points/bcd7ed830358dbd6d58ea69ee1ced10e.pts 03624134/expert_verified/points_label/bcd7ed830358dbd6d58ea69ee1ced10e.seg 03624134 +03001627/points/40d202afdcc49c6d35836c728d324152.pts 03001627/expert_verified/points_label/40d202afdcc49c6d35836c728d324152.seg 03001627 +03467517/points/fdb74c27462dfd837c481698bd5233b4.pts 03467517/expert_verified/points_label/fdb74c27462dfd837c481698bd5233b4.seg 03467517 +02691156/points/dc7c5d12854b9467b96212c8f6cd06e.pts 02691156/expert_verified/points_label/dc7c5d12854b9467b96212c8f6cd06e.seg 02691156 +02691156/points/48e9c61de4db838d84b83051fa0ae5d2.pts 02691156/expert_verified/points_label/48e9c61de4db838d84b83051fa0ae5d2.seg 02691156 +04379243/points/d187561a6b0cbd0acaed5ce7390f30b7.pts 04379243/expert_verified/points_label/d187561a6b0cbd0acaed5ce7390f30b7.seg 04379243 +04379243/points/ae9e04d050f5cba1492d9da2668ec34c.pts 04379243/expert_verified/points_label/ae9e04d050f5cba1492d9da2668ec34c.seg 04379243 +04379243/points/72c884f3b9b9119966f379f51753f72b.pts 04379243/expert_verified/points_label/72c884f3b9b9119966f379f51753f72b.seg 04379243 +02691156/points/917694a71164f2148e8405d6c51a908.pts 02691156/expert_verified/points_label/917694a71164f2148e8405d6c51a908.seg 02691156 +03001627/points/a2441f03fed7c13def31f91fe6afc8fa.pts 03001627/expert_verified/points_label/a2441f03fed7c13def31f91fe6afc8fa.seg 03001627 +03001627/points/49c955a80749d2e1a5ffdf44ff86b795.pts 03001627/expert_verified/points_label/49c955a80749d2e1a5ffdf44ff86b795.seg 03001627 +03636649/points/c43c89d862e10552b24ecc319936dfe2.pts 03636649/expert_verified/points_label/c43c89d862e10552b24ecc319936dfe2.seg 03636649 +03636649/points/e5ff9311bee487f5ca4aaad7dc0e3a16.pts 03636649/expert_verified/points_label/e5ff9311bee487f5ca4aaad7dc0e3a16.seg 03636649 +02958343/points/ba0ac1d1e25d3fad63f2c3a55558a78f.pts 02958343/expert_verified/points_label/ba0ac1d1e25d3fad63f2c3a55558a78f.seg 02958343 +04379243/points/2f58b1ca8634a6b48b9b51ae4415d5aa.pts 04379243/expert_verified/points_label/2f58b1ca8634a6b48b9b51ae4415d5aa.seg 04379243 +03001627/points/c585ee093bfd52af6512b7b24f3d84.pts 03001627/expert_verified/points_label/c585ee093bfd52af6512b7b24f3d84.seg 03001627 +03001627/points/46f6a6e0f239282fc8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/46f6a6e0f239282fc8687ff9b0b4e4ac.seg 03001627 +03642806/points/f72dc1ffeae0168aadcfd37206a0d18b.pts 03642806/expert_verified/points_label/f72dc1ffeae0168aadcfd37206a0d18b.seg 03642806 +03948459/points/1e83ef6ed5d0b78b7efb854782e23566.pts 03948459/expert_verified/points_label/1e83ef6ed5d0b78b7efb854782e23566.seg 03948459 +03001627/points/95e5f6e550761aefe65b629e4a22f51e.pts 03001627/expert_verified/points_label/95e5f6e550761aefe65b629e4a22f51e.seg 03001627 +03001627/points/b38d05caee69c7ac8fc6229eb64e56a.pts 03001627/expert_verified/points_label/b38d05caee69c7ac8fc6229eb64e56a.seg 03001627 +02691156/points/4ff50b9f815c58acca8607f540cc62ba.pts 02691156/expert_verified/points_label/4ff50b9f815c58acca8607f540cc62ba.seg 02691156 +03636649/points/78a11c0b8e964c9b41657e31b569b105.pts 03636649/expert_verified/points_label/78a11c0b8e964c9b41657e31b569b105.seg 03636649 +02958343/points/b1f75a8e8b9e921a8a6cf8c6b92417f2.pts 02958343/expert_verified/points_label/b1f75a8e8b9e921a8a6cf8c6b92417f2.seg 02958343 +02958343/points/a836fc66c01eccca58c27e607f6e2d4c.pts 02958343/expert_verified/points_label/a836fc66c01eccca58c27e607f6e2d4c.seg 02958343 +02691156/points/fac4af109beb0108b4f192eea1889928.pts 02691156/expert_verified/points_label/fac4af109beb0108b4f192eea1889928.seg 02691156 +03467517/points/b9c10bf6fc2095f93f0194265a9746c.pts 03467517/expert_verified/points_label/b9c10bf6fc2095f93f0194265a9746c.seg 03467517 +02691156/points/b976a48c015d6ced5e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/b976a48c015d6ced5e9e2656aff7dd5b.seg 02691156 +04379243/points/889f48aa85accd2ee73947fdf756a329.pts 04379243/expert_verified/points_label/889f48aa85accd2ee73947fdf756a329.seg 04379243 +02691156/points/b6d61068ef2bf2d46059aeb39e538eb2.pts 02691156/expert_verified/points_label/b6d61068ef2bf2d46059aeb39e538eb2.seg 02691156 +04379243/points/d94de64641651a2079b3e1be3524f72f.pts 04379243/expert_verified/points_label/d94de64641651a2079b3e1be3524f72f.seg 04379243 +03001627/points/117bd6da01905949a81116f5456ee312.pts 03001627/expert_verified/points_label/117bd6da01905949a81116f5456ee312.seg 03001627 +03636649/points/845542d0f578a9db1ec48bc3c478566d.pts 03636649/expert_verified/points_label/845542d0f578a9db1ec48bc3c478566d.seg 03636649 +04379243/points/9391dcc782fa7f6bfdad344760a9dafd.pts 04379243/expert_verified/points_label/9391dcc782fa7f6bfdad344760a9dafd.seg 04379243 +04379243/points/fe99a1127734f7852b70eac6546e93fd.pts 04379243/expert_verified/points_label/fe99a1127734f7852b70eac6546e93fd.seg 04379243 +03001627/points/4e358c2dc0513971f98c0761af40e04.pts 03001627/expert_verified/points_label/4e358c2dc0513971f98c0761af40e04.seg 03001627 +03636649/points/53afad2e573b26b141657e31b569b105.pts 03636649/expert_verified/points_label/53afad2e573b26b141657e31b569b105.seg 03636649 +04379243/points/3e51742cb382aa1f79b3e1be3524f72f.pts 04379243/expert_verified/points_label/3e51742cb382aa1f79b3e1be3524f72f.seg 04379243 +02958343/points/4f17af1ca7ae689d409b2c4484d833cc.pts 02958343/expert_verified/points_label/4f17af1ca7ae689d409b2c4484d833cc.seg 02958343 +03467517/points/c739664436ac5237aa0c867d5b070a5d.pts 03467517/expert_verified/points_label/c739664436ac5237aa0c867d5b070a5d.seg 03467517 +03797390/points/61c10dccfa8e508e2d66cbf6a91063.pts 03797390/expert_verified/points_label/61c10dccfa8e508e2d66cbf6a91063.seg 03797390 +03467517/points/aa86d20d03b2303593f0194265a9746c.pts 03467517/expert_verified/points_label/aa86d20d03b2303593f0194265a9746c.seg 03467517 +04379243/points/2f98d5e721e84debaa8081a7009091db.pts 04379243/expert_verified/points_label/2f98d5e721e84debaa8081a7009091db.seg 04379243 +04379243/points/2a0f853dadd841f96f1e07a56c129dfc.pts 04379243/expert_verified/points_label/2a0f853dadd841f96f1e07a56c129dfc.seg 04379243 +03001627/points/8031478c3fe31ddcc337647acafe65f0.pts 03001627/expert_verified/points_label/8031478c3fe31ddcc337647acafe65f0.seg 03001627 +03636649/points/a53112591be182b9d93768e7b9b1eabf.pts 03636649/expert_verified/points_label/a53112591be182b9d93768e7b9b1eabf.seg 03636649 +03001627/points/5bc916f8b9d0a7c6b40f0ac0fb9a650d.pts 03001627/expert_verified/points_label/5bc916f8b9d0a7c6b40f0ac0fb9a650d.seg 03001627 +02691156/points/f2d4b8440d4bde5330afbcb38d77d0c3.pts 02691156/expert_verified/points_label/f2d4b8440d4bde5330afbcb38d77d0c3.seg 02691156 +03001627/points/e4274fc2b9e4a5511882515d09f3979e.pts 03001627/expert_verified/points_label/e4274fc2b9e4a5511882515d09f3979e.seg 03001627 +03001627/points/9ab18a33335373b2659dda512294c744.pts 03001627/expert_verified/points_label/9ab18a33335373b2659dda512294c744.seg 03001627 +04379243/points/32ea6609eb659a2cec3367bccf60e518.pts 04379243/expert_verified/points_label/32ea6609eb659a2cec3367bccf60e518.seg 04379243 +04379243/points/759cb93134fd5efde76bc197b3a3ffc0.pts 04379243/expert_verified/points_label/759cb93134fd5efde76bc197b3a3ffc0.seg 04379243 +03001627/points/b8b5e172ee58899df2d9e72ba502035.pts 03001627/expert_verified/points_label/b8b5e172ee58899df2d9e72ba502035.seg 03001627 +03001627/points/1886b3e3f3d4af3ace522e6dda26fb51.pts 03001627/expert_verified/points_label/1886b3e3f3d4af3ace522e6dda26fb51.seg 03001627 +03948459/points/3f5f657bec9a21814ce6ac98dc4781fe.pts 03948459/expert_verified/points_label/3f5f657bec9a21814ce6ac98dc4781fe.seg 03948459 +04379243/points/5adf5a7173e588ad76e9713f57a5fcb6.pts 04379243/expert_verified/points_label/5adf5a7173e588ad76e9713f57a5fcb6.seg 04379243 +03001627/points/f33b6f791e9d64387d01b77e04a0bc7b.pts 03001627/expert_verified/points_label/f33b6f791e9d64387d01b77e04a0bc7b.seg 03001627 +04379243/points/4e928377ae98ed8d99e8bf807e902261.pts 04379243/expert_verified/points_label/4e928377ae98ed8d99e8bf807e902261.seg 04379243 +03001627/points/d7867d215f52107ba5e8cf3aa1686d66.pts 03001627/expert_verified/points_label/d7867d215f52107ba5e8cf3aa1686d66.seg 03001627 +02691156/points/bddc2c1a4fae008947a1dbf5fd48a4dd.pts 02691156/expert_verified/points_label/bddc2c1a4fae008947a1dbf5fd48a4dd.seg 02691156 +02958343/points/bafacc7f28509d4157abc6fa0d632bc7.pts 02958343/expert_verified/points_label/bafacc7f28509d4157abc6fa0d632bc7.seg 02958343 +02691156/points/a14b262838529c2c81e1d9f6b27f1a92.pts 02691156/expert_verified/points_label/a14b262838529c2c81e1d9f6b27f1a92.seg 02691156 +03001627/points/38afa26a419ea3abed040525648fc6d7.pts 03001627/expert_verified/points_label/38afa26a419ea3abed040525648fc6d7.seg 03001627 +04379243/points/79f63a1564928af071a782a4379556c7.pts 04379243/expert_verified/points_label/79f63a1564928af071a782a4379556c7.seg 04379243 +04379243/points/cbd1cd9b5423f890beedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/cbd1cd9b5423f890beedb4c8fd29e2d1.seg 04379243 +02691156/points/d74767519393a937f73e5bc170b7e2be.pts 02691156/expert_verified/points_label/d74767519393a937f73e5bc170b7e2be.seg 02691156 +03001627/points/9a82269e56737217e16571f1d370cad9.pts 03001627/expert_verified/points_label/9a82269e56737217e16571f1d370cad9.seg 03001627 +03001627/points/6e1e73e14637a28da1c367d7a459a9b7.pts 03001627/expert_verified/points_label/6e1e73e14637a28da1c367d7a459a9b7.seg 03001627 +03797390/points/eecb13f61a93b4048f58d8b19de93f99.pts 03797390/expert_verified/points_label/eecb13f61a93b4048f58d8b19de93f99.seg 03797390 +03001627/points/4f7523a3d276bfae4b3c42e318f3affc.pts 03001627/expert_verified/points_label/4f7523a3d276bfae4b3c42e318f3affc.seg 03001627 +03624134/points/f19fe19693937db1cb03b57fca000b1f.pts 03624134/expert_verified/points_label/f19fe19693937db1cb03b57fca000b1f.seg 03624134 +02958343/points/c3858a8b73dcb137e3bdba9430565083.pts 02958343/expert_verified/points_label/c3858a8b73dcb137e3bdba9430565083.seg 02958343 +04379243/points/3ce930bb150aef8a69fb38085fbc320c.pts 04379243/expert_verified/points_label/3ce930bb150aef8a69fb38085fbc320c.seg 04379243 +04379243/points/75e3cbf4b1ef0df971a782a4379556c7.pts 04379243/expert_verified/points_label/75e3cbf4b1ef0df971a782a4379556c7.seg 04379243 +04379243/points/5040f8f3e2293db448e116352760c52d.pts 04379243/expert_verified/points_label/5040f8f3e2293db448e116352760c52d.seg 04379243 +04379243/points/edaf24be15738ea2c5d1c45cadcaa3eb.pts 04379243/expert_verified/points_label/edaf24be15738ea2c5d1c45cadcaa3eb.seg 04379243 +04379243/points/6fb52c296531dc17beedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/6fb52c296531dc17beedb4c8fd29e2d1.seg 04379243 +04379243/points/e777df6ffb40e3a1853d412328e7e7a6.pts 04379243/expert_verified/points_label/e777df6ffb40e3a1853d412328e7e7a6.seg 04379243 +03001627/points/9c103621101bcf9919fb4103277a6b93.pts 03001627/expert_verified/points_label/9c103621101bcf9919fb4103277a6b93.seg 03001627 +03001627/points/5d20adaf6d8f89fa2f1c10544d7d6f.pts 03001627/expert_verified/points_label/5d20adaf6d8f89fa2f1c10544d7d6f.seg 03001627 +02691156/points/b80bd34ab330babbc8727b27ee96a4b7.pts 02691156/expert_verified/points_label/b80bd34ab330babbc8727b27ee96a4b7.seg 02691156 +04379243/points/50d898f6d1c05cee2d99129afd32edf4.pts 04379243/expert_verified/points_label/50d898f6d1c05cee2d99129afd32edf4.seg 04379243 +04379243/points/c0c836c630cdb4bb664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/c0c836c630cdb4bb664b3b9b23ddfcbc.seg 04379243 +03790512/points/a1553e0bb7897a7ace0bf41e5f45753d.pts 03790512/expert_verified/points_label/a1553e0bb7897a7ace0bf41e5f45753d.seg 03790512 +03467517/points/7701180906a0aa156a7ae841f1f88f87.pts 03467517/expert_verified/points_label/7701180906a0aa156a7ae841f1f88f87.seg 03467517 +03467517/points/3ef569c13f4ab5f83ac61a2f8346a8f.pts 03467517/expert_verified/points_label/3ef569c13f4ab5f83ac61a2f8346a8f.seg 03467517 +03636649/points/3834d7f376879c03eca29403b7226aa1.pts 03636649/expert_verified/points_label/3834d7f376879c03eca29403b7226aa1.seg 03636649 +02958343/points/34ab29cea66952f16f48edd113a40fce.pts 02958343/expert_verified/points_label/34ab29cea66952f16f48edd113a40fce.seg 02958343 +02958343/points/e24f388736f4e6fd2cdd250493632937.pts 02958343/expert_verified/points_label/e24f388736f4e6fd2cdd250493632937.seg 02958343 +03001627/points/3ae022522800685c610195e4fb10d1de.pts 03001627/expert_verified/points_label/3ae022522800685c610195e4fb10d1de.seg 03001627 +02691156/points/49660fd24e5c2fbab87697d3904b168b.pts 02691156/expert_verified/points_label/49660fd24e5c2fbab87697d3904b168b.seg 02691156 +03642806/points/2d5d4d79cd464298566636e42679cc7f.pts 03642806/expert_verified/points_label/2d5d4d79cd464298566636e42679cc7f.seg 03642806 +04379243/points/7988dedacce42552ab610b0c94236463.pts 04379243/expert_verified/points_label/7988dedacce42552ab610b0c94236463.seg 04379243 +04379243/points/91ed62f2b3fd5919f12d7184a2ad3430.pts 04379243/expert_verified/points_label/91ed62f2b3fd5919f12d7184a2ad3430.seg 04379243 +03001627/points/a5898fefb1733333a82b0d8d157287f5.pts 03001627/expert_verified/points_label/a5898fefb1733333a82b0d8d157287f5.seg 03001627 +04379243/points/b4ef1de99422b08768661782af60b711.pts 04379243/expert_verified/points_label/b4ef1de99422b08768661782af60b711.seg 04379243 +03001627/points/df2b7e697ab6ca0f155d75bbf62b80.pts 03001627/expert_verified/points_label/df2b7e697ab6ca0f155d75bbf62b80.seg 03001627 +03467517/points/408a8e1b51266b9ccc34b900bb2492e.pts 03467517/expert_verified/points_label/408a8e1b51266b9ccc34b900bb2492e.seg 03467517 +03001627/points/597f2b2153af0c544aabcf2a7cb640f9.pts 03001627/expert_verified/points_label/597f2b2153af0c544aabcf2a7cb640f9.seg 03001627 +03001627/points/6870fbd4a7b733b0674f1c30a8cad95a.pts 03001627/expert_verified/points_label/6870fbd4a7b733b0674f1c30a8cad95a.seg 03001627 +03001627/points/e35d7d19dcdc9e5c30e06a011e63236a.pts 03001627/expert_verified/points_label/e35d7d19dcdc9e5c30e06a011e63236a.seg 03001627 +04225987/points/58ade10f7f87edc6e860048d7ced02e3.pts 04225987/expert_verified/points_label/58ade10f7f87edc6e860048d7ced02e3.seg 04225987 +04379243/points/39cf5ae2b497715a84253b2030fab070.pts 04379243/expert_verified/points_label/39cf5ae2b497715a84253b2030fab070.seg 04379243 +04379243/points/ab7b0db92f96381f8cbb8bac2032149c.pts 04379243/expert_verified/points_label/ab7b0db92f96381f8cbb8bac2032149c.seg 04379243 +03001627/points/b117b01ab380362db8134b0fbf68257d.pts 03001627/expert_verified/points_label/b117b01ab380362db8134b0fbf68257d.seg 03001627 +03467517/points/913f3c90f5b78256e98e318d424a4bb9.pts 03467517/expert_verified/points_label/913f3c90f5b78256e98e318d424a4bb9.seg 03467517 +04379243/points/831985fb385a5b2a9ae2d75b4fc35b7.pts 04379243/expert_verified/points_label/831985fb385a5b2a9ae2d75b4fc35b7.seg 04379243 +03467517/points/482b8b9a225b6ca1d57700c05b1862d8.pts 03467517/expert_verified/points_label/482b8b9a225b6ca1d57700c05b1862d8.seg 03467517 +03001627/points/93a6876247c7a015d84b8ba651dfb8ac.pts 03001627/expert_verified/points_label/93a6876247c7a015d84b8ba651dfb8ac.seg 03001627 +04379243/points/a78273aa10b2dfb0bc8d334f99e7f52.pts 04379243/expert_verified/points_label/a78273aa10b2dfb0bc8d334f99e7f52.seg 04379243 +04379243/points/3c686ac317c496f9a71c812e027f94d9.pts 04379243/expert_verified/points_label/3c686ac317c496f9a71c812e027f94d9.seg 04379243 +02691156/points/50755e616df58fe566cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/50755e616df58fe566cf1b4a8fc3914e.seg 02691156 +03001627/points/8cedc8e684d60ff42a06d8c81262ef96.pts 03001627/expert_verified/points_label/8cedc8e684d60ff42a06d8c81262ef96.seg 03001627 +04379243/points/f74c321042dbc8e684d78f017ff73fd6.pts 04379243/expert_verified/points_label/f74c321042dbc8e684d78f017ff73fd6.seg 04379243 +02958343/points/5130947e5f18e73a8321b7d65a99d2a.pts 02958343/expert_verified/points_label/5130947e5f18e73a8321b7d65a99d2a.seg 02958343 +03261776/points/f5d210ff14ca9d29b6d9c2cee7f2f72b.pts 03261776/expert_verified/points_label/f5d210ff14ca9d29b6d9c2cee7f2f72b.seg 03261776 +03001627/points/d36de0f850783d8fd6b3090036b71698.pts 03001627/expert_verified/points_label/d36de0f850783d8fd6b3090036b71698.seg 03001627 +03001627/points/6897c2665267cca39eea64ae4d2b4158.pts 03001627/expert_verified/points_label/6897c2665267cca39eea64ae4d2b4158.seg 03001627 +03001627/points/6e98c5d61e008b4c2871cc0b3cc1a485.pts 03001627/expert_verified/points_label/6e98c5d61e008b4c2871cc0b3cc1a485.seg 03001627 +02958343/points/92f697d036addb55ed576c2966428f.pts 02958343/expert_verified/points_label/92f697d036addb55ed576c2966428f.seg 02958343 +04379243/points/f3fd419f725aa894ba5342d638d0c267.pts 04379243/expert_verified/points_label/f3fd419f725aa894ba5342d638d0c267.seg 04379243 +04379243/points/62eff79cf2e75bc2765ee729adbdf968.pts 04379243/expert_verified/points_label/62eff79cf2e75bc2765ee729adbdf968.seg 04379243 +03001627/points/98a1f8651c962402492d9da2668ec34c.pts 03001627/expert_verified/points_label/98a1f8651c962402492d9da2668ec34c.seg 03001627 +03636649/points/d90639e69c82f864eb2d9895648d1206.pts 03636649/expert_verified/points_label/d90639e69c82f864eb2d9895648d1206.seg 03636649 +02954340/points/a1494210f6774b87b3e0e60b857dde8f.pts 02954340/expert_verified/points_label/a1494210f6774b87b3e0e60b857dde8f.seg 02954340 +03467517/points/d528407fe43b5df193f0194265a9746c.pts 03467517/expert_verified/points_label/d528407fe43b5df193f0194265a9746c.seg 03467517 +03636649/points/776e4b38023091002cd2160e449d45ae.pts 03636649/expert_verified/points_label/776e4b38023091002cd2160e449d45ae.seg 03636649 +04379243/points/91df49ec00f2c5ce73f1ca2ca101a20d.pts 04379243/expert_verified/points_label/91df49ec00f2c5ce73f1ca2ca101a20d.seg 04379243 +04379243/points/47f25d5b367326ceaaf15b62af6b513f.pts 04379243/expert_verified/points_label/47f25d5b367326ceaaf15b62af6b513f.seg 04379243 +04379243/points/f5d6579b3a1f5a879d2be74cfb51ade1.pts 04379243/expert_verified/points_label/f5d6579b3a1f5a879d2be74cfb51ade1.seg 04379243 +02691156/points/f6ea6663b48bf78261f1ef59130c405d.pts 02691156/expert_verified/points_label/f6ea6663b48bf78261f1ef59130c405d.seg 02691156 +03001627/points/63da17eda9d415b5319c5e90e9cc9126.pts 03001627/expert_verified/points_label/63da17eda9d415b5319c5e90e9cc9126.seg 03001627 +02691156/points/9fb60716f0f5a2b84408eb298433d643.pts 02691156/expert_verified/points_label/9fb60716f0f5a2b84408eb298433d643.seg 02691156 +02773838/points/5161d9adede671d6edc32c5c9ec9f827.pts 02773838/expert_verified/points_label/5161d9adede671d6edc32c5c9ec9f827.seg 02773838 +04379243/points/696beb1883be838cc955e5ed03ef3a2f.pts 04379243/expert_verified/points_label/696beb1883be838cc955e5ed03ef3a2f.seg 04379243 +03001627/points/bc184c3cbe3349b19fb4103277a6b93.pts 03001627/expert_verified/points_label/bc184c3cbe3349b19fb4103277a6b93.seg 03001627 +03642806/points/28fbfd8b8c9c6f16e1e44e2fc05361d9.pts 03642806/expert_verified/points_label/28fbfd8b8c9c6f16e1e44e2fc05361d9.seg 03642806 +04379243/points/506e4e67efe1794c1dacbc3d67b5a11a.pts 04379243/expert_verified/points_label/506e4e67efe1794c1dacbc3d67b5a11a.seg 04379243 +02691156/points/a48676cfe44fd9bee40acb87a6be88b3.pts 02691156/expert_verified/points_label/a48676cfe44fd9bee40acb87a6be88b3.seg 02691156 +04379243/points/9e5926bfdc7f01749e65a3d2929a9516.pts 04379243/expert_verified/points_label/9e5926bfdc7f01749e65a3d2929a9516.seg 04379243 +04379243/points/dc47d49db6ac670635d498476a30ff0e.pts 04379243/expert_verified/points_label/dc47d49db6ac670635d498476a30ff0e.seg 04379243 +04379243/points/33c6e3b21a67b750e78d7b497732dce1.pts 04379243/expert_verified/points_label/33c6e3b21a67b750e78d7b497732dce1.seg 04379243 +04379243/points/27295a6f585b7817febad4f49b26ec52.pts 04379243/expert_verified/points_label/27295a6f585b7817febad4f49b26ec52.seg 04379243 +03624134/points/6f8b660661269406504c6b6d62466c67.pts 03624134/expert_verified/points_label/6f8b660661269406504c6b6d62466c67.seg 03624134 +03642806/points/dbc61cbed5f7f2b33c1abb78f1519c49.pts 03642806/expert_verified/points_label/dbc61cbed5f7f2b33c1abb78f1519c49.seg 03642806 +03001627/points/374bec02e71fe06528b4c5ec471dc963.pts 03001627/expert_verified/points_label/374bec02e71fe06528b4c5ec471dc963.seg 03001627 +03001627/points/b41aaea5754adae0444b41d6d7f557fa.pts 03001627/expert_verified/points_label/b41aaea5754adae0444b41d6d7f557fa.seg 03001627 +03001627/points/7f4f73ad1b3f882ba14472becb07b261.pts 03001627/expert_verified/points_label/7f4f73ad1b3f882ba14472becb07b261.seg 03001627 +03001627/points/b80122c3a0543a7b7eaeab1f0c9120b7.pts 03001627/expert_verified/points_label/b80122c3a0543a7b7eaeab1f0c9120b7.seg 03001627 +04379243/points/2e4fbab46e264616d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/2e4fbab46e264616d93768e7b9b1eabf.seg 04379243 +03001627/points/4a12589099b05c51e13b3410f3683610.pts 03001627/expert_verified/points_label/4a12589099b05c51e13b3410f3683610.seg 03001627 +03001627/points/bc523df998d94c7223ac0bd64c9cb255.pts 03001627/expert_verified/points_label/bc523df998d94c7223ac0bd64c9cb255.seg 03001627 +02691156/points/218caa58819e10d1fe40308d822f996c.pts 02691156/expert_verified/points_label/218caa58819e10d1fe40308d822f996c.seg 02691156 +04379243/points/a5e951c9d7a9a93f8cbb8bac2032149c.pts 04379243/expert_verified/points_label/a5e951c9d7a9a93f8cbb8bac2032149c.seg 04379243 +03636649/points/f228f6cd86162beb659dda512294c744.pts 03636649/expert_verified/points_label/f228f6cd86162beb659dda512294c744.seg 03636649 +03467517/points/648a820e550bdfd093f0194265a9746c.pts 03467517/expert_verified/points_label/648a820e550bdfd093f0194265a9746c.seg 03467517 +03624134/points/8f61777bf6b57fedc13545c5b1a2e607.pts 03624134/expert_verified/points_label/8f61777bf6b57fedc13545c5b1a2e607.seg 03624134 +03001627/points/bb9efb4912a018b3c329e2758ab09ecb.pts 03001627/expert_verified/points_label/bb9efb4912a018b3c329e2758ab09ecb.seg 03001627 +03001627/points/fdac1f9c0b030841c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/fdac1f9c0b030841c8687ff9b0b4e4ac.seg 03001627 +02691156/points/8ac8c21b63ff535fca8607f540cc62ba.pts 02691156/expert_verified/points_label/8ac8c21b63ff535fca8607f540cc62ba.seg 02691156 +03467517/points/4e4d180e78d8b52a93f0194265a9746c.pts 03467517/expert_verified/points_label/4e4d180e78d8b52a93f0194265a9746c.seg 03467517 +03636649/points/7bc1b202ebf000625949e084b65603cf.pts 03636649/expert_verified/points_label/7bc1b202ebf000625949e084b65603cf.seg 03636649 +03001627/points/3c8362c1e57c30d7e6c5cd45aa112726.pts 03001627/expert_verified/points_label/3c8362c1e57c30d7e6c5cd45aa112726.seg 03001627 +03001627/points/5510d5af1ab5714b3c42e318f3affc.pts 03001627/expert_verified/points_label/5510d5af1ab5714b3c42e318f3affc.seg 03001627 +04379243/points/4d393b562df7cfad9a16b095d67f7209.pts 04379243/expert_verified/points_label/4d393b562df7cfad9a16b095d67f7209.seg 04379243 +03797390/points/e984fd7e97c2be347eaeab1f0c9120b7.pts 03797390/expert_verified/points_label/e984fd7e97c2be347eaeab1f0c9120b7.seg 03797390 +03001627/points/483d22dbbee32ee54e5c7d89bdfc49a3.pts 03001627/expert_verified/points_label/483d22dbbee32ee54e5c7d89bdfc49a3.seg 03001627 +02691156/points/a5cd14be786fc8175e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/a5cd14be786fc8175e9e2656aff7dd5b.seg 02691156 +03636649/points/d4bbd93c0d85e77d7934a0d24a61231.pts 03636649/expert_verified/points_label/d4bbd93c0d85e77d7934a0d24a61231.seg 03636649 +03467517/points/7027bc171baae1d663e148e250c0340d.pts 03467517/expert_verified/points_label/7027bc171baae1d663e148e250c0340d.seg 03467517 +03636649/points/1a44dd6ee873d443da13974b3533fb59.pts 03636649/expert_verified/points_label/1a44dd6ee873d443da13974b3533fb59.seg 03636649 +04379243/points/2e3037a285fd8b5c1be2a853ec4f9e8.pts 04379243/expert_verified/points_label/2e3037a285fd8b5c1be2a853ec4f9e8.seg 04379243 +04379243/points/e3b585b15506fa7113f96345312df593.pts 04379243/expert_verified/points_label/e3b585b15506fa7113f96345312df593.seg 04379243 +02958343/points/ee1d28a50a2b71e129348d14ca881f7d.pts 02958343/expert_verified/points_label/ee1d28a50a2b71e129348d14ca881f7d.seg 02958343 +03001627/points/22af872ac796ed26ff8d7c1096fae070.pts 03001627/expert_verified/points_label/22af872ac796ed26ff8d7c1096fae070.seg 03001627 +03642806/points/9b4ab67eb448c49c11ced4a54f2e6229.pts 03642806/expert_verified/points_label/9b4ab67eb448c49c11ced4a54f2e6229.seg 03642806 +03624134/points/1640911b9dc0ef0da95c6095f89cd899.pts 03624134/expert_verified/points_label/1640911b9dc0ef0da95c6095f89cd899.seg 03624134 +03001627/points/f6810de4042cc5ce57bd4bc6eae9b341.pts 03001627/expert_verified/points_label/f6810de4042cc5ce57bd4bc6eae9b341.seg 03001627 +03001627/points/c46eb7460be602b6bf80985a99195eb8.pts 03001627/expert_verified/points_label/c46eb7460be602b6bf80985a99195eb8.seg 03001627 +03624134/points/debbbf239d59d8724662dc124dd336ed.pts 03624134/expert_verified/points_label/debbbf239d59d8724662dc124dd336ed.seg 03624134 +04379243/points/5b51e63726f21bb6a75d03186a0409e2.pts 04379243/expert_verified/points_label/5b51e63726f21bb6a75d03186a0409e2.seg 04379243 +02691156/points/b59a7cab8e95f6eaf3a7414a84b5637.pts 02691156/expert_verified/points_label/b59a7cab8e95f6eaf3a7414a84b5637.seg 02691156 +03001627/points/52c32b187590e8f3bba5aaac798c64af.pts 03001627/expert_verified/points_label/52c32b187590e8f3bba5aaac798c64af.seg 03001627 +03001627/points/1c173d970e21e9a8be95ff480950e9ef.pts 03001627/expert_verified/points_label/1c173d970e21e9a8be95ff480950e9ef.seg 03001627 +03624134/points/7238d0009faeacb5fd770de1635caa0.pts 03624134/expert_verified/points_label/7238d0009faeacb5fd770de1635caa0.seg 03624134 +04379243/points/cc554812025dc498e7ed5b5b11f935c9.pts 04379243/expert_verified/points_label/cc554812025dc498e7ed5b5b11f935c9.seg 04379243 +04379243/points/fff492e352c8cb336240c88cd4684446.pts 04379243/expert_verified/points_label/fff492e352c8cb336240c88cd4684446.seg 04379243 +03636649/points/e0a2948797cc33b2e19a0cc107ada7cd.pts 03636649/expert_verified/points_label/e0a2948797cc33b2e19a0cc107ada7cd.seg 03636649 +03636649/points/fe02f6594ed8b96ae85a3dc26b76b2ae.pts 03636649/expert_verified/points_label/fe02f6594ed8b96ae85a3dc26b76b2ae.seg 03636649 +04379243/points/d4a7a1dc0f1a51986f15d61c214769af.pts 04379243/expert_verified/points_label/d4a7a1dc0f1a51986f15d61c214769af.seg 04379243 +03624134/points/3dbda789bc59a5f99246ea0301684d80.pts 03624134/expert_verified/points_label/3dbda789bc59a5f99246ea0301684d80.seg 03624134 +04379243/points/b82e068c2c18cd67b09f0ca9c143fdfd.pts 04379243/expert_verified/points_label/b82e068c2c18cd67b09f0ca9c143fdfd.seg 04379243 +03001627/points/b360f2264526521f1dee989d1177ef4e.pts 03001627/expert_verified/points_label/b360f2264526521f1dee989d1177ef4e.seg 03001627 +02691156/points/8ff8f3c845e7ae8443afdb9c81ff2967.pts 02691156/expert_verified/points_label/8ff8f3c845e7ae8443afdb9c81ff2967.seg 02691156 +03001627/points/ea87765cf9dbe2fe55f46d55537192b6.pts 03001627/expert_verified/points_label/ea87765cf9dbe2fe55f46d55537192b6.seg 03001627 +03001627/points/df23ca11080bb439676c272956dad3c2.pts 03001627/expert_verified/points_label/df23ca11080bb439676c272956dad3c2.seg 03001627 +03790512/points/a3dfeae5bced3533b37378f3c85478b4.pts 03790512/expert_verified/points_label/a3dfeae5bced3533b37378f3c85478b4.seg 03790512 +04379243/points/9af7a071bbd432baa5526f91aecc0c37.pts 04379243/expert_verified/points_label/9af7a071bbd432baa5526f91aecc0c37.seg 04379243 +03001627/points/a8b5f5b6bf0cb2d6876b399a99a15c0f.pts 03001627/expert_verified/points_label/a8b5f5b6bf0cb2d6876b399a99a15c0f.seg 03001627 +03001627/points/c7e590c0390e8d5debe67d9b32c3ddf8.pts 03001627/expert_verified/points_label/c7e590c0390e8d5debe67d9b32c3ddf8.seg 03001627 +03790512/points/4f30742005b7c20e883158c0007ed9ba.pts 03790512/expert_verified/points_label/4f30742005b7c20e883158c0007ed9ba.seg 03790512 +04379243/points/40b632472f8e69a7664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/40b632472f8e69a7664b3b9b23ddfcbc.seg 04379243 +03467517/points/d71c17b4d1ffa131f10a27cbb87f3a5.pts 03467517/expert_verified/points_label/d71c17b4d1ffa131f10a27cbb87f3a5.seg 03467517 +04379243/points/f563e9cd92a0dbe5a07b1c1d0ca9cf45.pts 04379243/expert_verified/points_label/f563e9cd92a0dbe5a07b1c1d0ca9cf45.seg 04379243 +03797390/points/1a97f3c83016abca21d0de04f408950f.pts 03797390/expert_verified/points_label/1a97f3c83016abca21d0de04f408950f.seg 03797390 +04379243/points/c3135e3b21b42e132449009b96f8a6ed.pts 04379243/expert_verified/points_label/c3135e3b21b42e132449009b96f8a6ed.seg 04379243 +03636649/points/89b168160388c29da996f5a90dae9cac.pts 03636649/expert_verified/points_label/89b168160388c29da996f5a90dae9cac.seg 03636649 +02958343/points/8bbbfdbec9251733ace5721ccacba16.pts 02958343/expert_verified/points_label/8bbbfdbec9251733ace5721ccacba16.seg 02958343 +04379243/points/db5a895ae7358c97b66213207f46bee7.pts 04379243/expert_verified/points_label/db5a895ae7358c97b66213207f46bee7.seg 04379243 +03001627/points/6a28919186eb55ecf69d0cf4fdc89b12.pts 03001627/expert_verified/points_label/6a28919186eb55ecf69d0cf4fdc89b12.seg 03001627 +04379243/points/e7169243daef074dc82dc2efb3363de1.pts 04379243/expert_verified/points_label/e7169243daef074dc82dc2efb3363de1.seg 04379243 +03467517/points/4ae5a491c3ffb473462c6cdd250c26bb.pts 03467517/expert_verified/points_label/4ae5a491c3ffb473462c6cdd250c26bb.seg 03467517 +04379243/points/e1a8e9e2059f4792fbb8cbddab1c2002.pts 04379243/expert_verified/points_label/e1a8e9e2059f4792fbb8cbddab1c2002.seg 04379243 +03467517/points/364f85832427992343820c03f9f59458.pts 03467517/expert_verified/points_label/364f85832427992343820c03f9f59458.seg 03467517 +02958343/points/4822076e48b366371f0d59cde6139796.pts 02958343/expert_verified/points_label/4822076e48b366371f0d59cde6139796.seg 02958343 +03636649/points/d34a10201a5448a253cf897b7fc1d12.pts 03636649/expert_verified/points_label/d34a10201a5448a253cf897b7fc1d12.seg 03636649 +03467517/points/77095861248c816693f0194265a9746c.pts 03467517/expert_verified/points_label/77095861248c816693f0194265a9746c.seg 03467517 +04379243/points/dacde6546ca2e07f66dc6ea1ac82d91f.pts 04379243/expert_verified/points_label/dacde6546ca2e07f66dc6ea1ac82d91f.seg 04379243 +03636649/points/670ad2964ad5a98c9f1a71e46bbde97c.pts 03636649/expert_verified/points_label/670ad2964ad5a98c9f1a71e46bbde97c.seg 03636649 +02691156/points/77c9fd0f0c6b0e9fca8607f540cc62ba.pts 02691156/expert_verified/points_label/77c9fd0f0c6b0e9fca8607f540cc62ba.seg 02691156 +03001627/points/5fc6b04623ae6a9963ed57e35c972b4b.pts 03001627/expert_verified/points_label/5fc6b04623ae6a9963ed57e35c972b4b.seg 03001627 +02958343/points/f18093ac0242d439f500cc506a763c18.pts 02958343/expert_verified/points_label/f18093ac0242d439f500cc506a763c18.seg 02958343 +03001627/points/2fed64c67552aa689c1db271ad9472a7.pts 03001627/expert_verified/points_label/2fed64c67552aa689c1db271ad9472a7.seg 03001627 +03001627/points/bf7e8e0dc4f4038cc2567be77cb7ab45.pts 03001627/expert_verified/points_label/bf7e8e0dc4f4038cc2567be77cb7ab45.seg 03001627 +04379243/points/690e073a4000c7ae540e292bd26f307a.pts 04379243/expert_verified/points_label/690e073a4000c7ae540e292bd26f307a.seg 04379243 +03467517/points/5fc56e6d220d775e381b7fbf79296afb.pts 03467517/expert_verified/points_label/5fc56e6d220d775e381b7fbf79296afb.seg 03467517 +04379243/points/8af3fd230ea7ac6518101790733ed6b2.pts 04379243/expert_verified/points_label/8af3fd230ea7ac6518101790733ed6b2.seg 04379243 +03636649/points/80436dff2a30721849655ac7c771b113.pts 03636649/expert_verified/points_label/80436dff2a30721849655ac7c771b113.seg 03636649 +03790512/points/b767982d38b5171e429f1c522640e6f0.pts 03790512/expert_verified/points_label/b767982d38b5171e429f1c522640e6f0.seg 03790512 +03001627/points/40e6fb27aeb9c9ab44f999802029a79a.pts 03001627/expert_verified/points_label/40e6fb27aeb9c9ab44f999802029a79a.seg 03001627 +04379243/points/59e1afdec89de9442b70eac6546e93fd.pts 04379243/expert_verified/points_label/59e1afdec89de9442b70eac6546e93fd.seg 04379243 +02691156/points/43d8125d940bb2ae850f318836ee7512.pts 02691156/expert_verified/points_label/43d8125d940bb2ae850f318836ee7512.seg 02691156 +02691156/points/cbc9d6ae9d22fcc57f3efc94c2d31dc5.pts 02691156/expert_verified/points_label/cbc9d6ae9d22fcc57f3efc94c2d31dc5.seg 02691156 +04379243/points/f585560965413925d706ecb3379aa341.pts 04379243/expert_verified/points_label/f585560965413925d706ecb3379aa341.seg 04379243 +04379243/points/adee49b8f5251efeaade78cbbf8fad3b.pts 04379243/expert_verified/points_label/adee49b8f5251efeaade78cbbf8fad3b.seg 04379243 +03261776/points/ccf84f2cbd3ebeb247ba1bc05b9a0f37.pts 03261776/expert_verified/points_label/ccf84f2cbd3ebeb247ba1bc05b9a0f37.seg 03261776 +03001627/points/2343e2c4fa69f33a2ff834514c92e8fd.pts 03001627/expert_verified/points_label/2343e2c4fa69f33a2ff834514c92e8fd.seg 03001627 +03636649/points/1d89da4ac1538ada9c949ae6274aa016.pts 03636649/expert_verified/points_label/1d89da4ac1538ada9c949ae6274aa016.seg 03636649 +03001627/points/51e14c516e45ec3b18ed59365c9648a7.pts 03001627/expert_verified/points_label/51e14c516e45ec3b18ed59365c9648a7.seg 03001627 +03001627/points/1e276a016b664e424d678187b8261d95.pts 03001627/expert_verified/points_label/1e276a016b664e424d678187b8261d95.seg 03001627 +03636649/points/4deef34d95367b58c0d95250e682f6ee.pts 03636649/expert_verified/points_label/4deef34d95367b58c0d95250e682f6ee.seg 03636649 +03001627/points/5d3eff6a1b9a119da011ccf7cbabf68e.pts 03001627/expert_verified/points_label/5d3eff6a1b9a119da011ccf7cbabf68e.seg 03001627 +04379243/points/9afaf5ab87a889f67acae9ce58893de5.pts 04379243/expert_verified/points_label/9afaf5ab87a889f67acae9ce58893de5.seg 04379243 +04379243/points/5431993203dfcf797ec12e029bc725db.pts 04379243/expert_verified/points_label/5431993203dfcf797ec12e029bc725db.seg 04379243 +03001627/points/6a01eed3a575987211e48e4bcdc4a2a3.pts 03001627/expert_verified/points_label/6a01eed3a575987211e48e4bcdc4a2a3.seg 03001627 +02958343/points/a8f2c3adc0671c15c64e95fc6a597455.pts 02958343/expert_verified/points_label/a8f2c3adc0671c15c64e95fc6a597455.seg 02958343 +04379243/points/f60960ae4dc8e293c8ce22a41ea48e48.pts 04379243/expert_verified/points_label/f60960ae4dc8e293c8ce22a41ea48e48.seg 04379243 +03624134/points/3a4f0118a57093cbf7c4ed45ce654123.pts 03624134/expert_verified/points_label/3a4f0118a57093cbf7c4ed45ce654123.seg 03624134 +03636649/points/52783aa89adf06f3250c527721570ba0.pts 03636649/expert_verified/points_label/52783aa89adf06f3250c527721570ba0.seg 03636649 +03001627/points/b13a4df698183bf9afb6676a5cd782b6.pts 03001627/expert_verified/points_label/b13a4df698183bf9afb6676a5cd782b6.seg 03001627 +03636649/points/26f725bb6578936cd247b9308cd5c441.pts 03636649/expert_verified/points_label/26f725bb6578936cd247b9308cd5c441.seg 03636649 +03001627/points/6df1ecffaa0abdbf327289c00b6dc9ca.pts 03001627/expert_verified/points_label/6df1ecffaa0abdbf327289c00b6dc9ca.seg 03001627 +04379243/points/3c475d9f0433a7eaad2650d014e970a5.pts 04379243/expert_verified/points_label/3c475d9f0433a7eaad2650d014e970a5.seg 04379243 +02958343/points/fee1c13922c07e8711b978ff9450f61b.pts 02958343/expert_verified/points_label/fee1c13922c07e8711b978ff9450f61b.seg 02958343 +04379243/points/6bc941dbd290c7f21acdac000802e11c.pts 04379243/expert_verified/points_label/6bc941dbd290c7f21acdac000802e11c.seg 04379243 +02958343/points/6333b9c777384ad14362be10a3fc8255.pts 02958343/expert_verified/points_label/6333b9c777384ad14362be10a3fc8255.seg 02958343 +03001627/points/9a35f15e924e19db637adadafee6f182.pts 03001627/expert_verified/points_label/9a35f15e924e19db637adadafee6f182.seg 03001627 +03001627/points/b0531a0d44fc22144224ee0743294f79.pts 03001627/expert_verified/points_label/b0531a0d44fc22144224ee0743294f79.seg 03001627 +03636649/points/913ff6452d0ea43c9d62807daf4a2134.pts 03636649/expert_verified/points_label/913ff6452d0ea43c9d62807daf4a2134.seg 03636649 +03467517/points/e45f323ce7ecab8393f0194265a9746c.pts 03467517/expert_verified/points_label/e45f323ce7ecab8393f0194265a9746c.seg 03467517 +02691156/points/aa2af754642256c08699933784576e73.pts 02691156/expert_verified/points_label/aa2af754642256c08699933784576e73.seg 02691156 +04379243/points/75b308ba45762ad499e8bf807e902261.pts 04379243/expert_verified/points_label/75b308ba45762ad499e8bf807e902261.seg 04379243 +03001627/points/3622d983fd6d7b98e3a73d090627e9ba.pts 03001627/expert_verified/points_label/3622d983fd6d7b98e3a73d090627e9ba.seg 03001627 +04225987/points/db4c8bf323465e4c537d393009a79347.pts 04225987/expert_verified/points_label/db4c8bf323465e4c537d393009a79347.seg 04225987 +04379243/points/132bfde1fabe9ab771a782a4379556c7.pts 04379243/expert_verified/points_label/132bfde1fabe9ab771a782a4379556c7.seg 04379243 +03001627/points/3dc8243b17bc790620768660cf080d12.pts 03001627/expert_verified/points_label/3dc8243b17bc790620768660cf080d12.seg 03001627 +04379243/points/ccb96ea5f047c97f278d386bfa54545.pts 04379243/expert_verified/points_label/ccb96ea5f047c97f278d386bfa54545.seg 04379243 +04379243/points/14ae5631e7dfa10430bbd4cddd04c77b.pts 04379243/expert_verified/points_label/14ae5631e7dfa10430bbd4cddd04c77b.seg 04379243 +04379243/points/78a81cbd2a5720d93a938fdd57fac3b4.pts 04379243/expert_verified/points_label/78a81cbd2a5720d93a938fdd57fac3b4.seg 04379243 +04379243/points/307bdd2a06137694a10ff7fd5e43a633.pts 04379243/expert_verified/points_label/307bdd2a06137694a10ff7fd5e43a633.seg 04379243 +03001627/points/f3573756e64259f2b29d280b4e59c527.pts 03001627/expert_verified/points_label/f3573756e64259f2b29d280b4e59c527.seg 03001627 +04379243/points/1815c6431b06dfb4f008d8a3590fb522.pts 04379243/expert_verified/points_label/1815c6431b06dfb4f008d8a3590fb522.seg 04379243 +04379243/points/7fda06ada2d897baadab4c26397edfab.pts 04379243/expert_verified/points_label/7fda06ada2d897baadab4c26397edfab.seg 04379243 +04379243/points/86b48365b2bd587e61830bc1b4d6c5ea.pts 04379243/expert_verified/points_label/86b48365b2bd587e61830bc1b4d6c5ea.seg 04379243 +03948459/points/6aae44dd39fb9476f059c10da31213ea.pts 03948459/expert_verified/points_label/6aae44dd39fb9476f059c10da31213ea.seg 03948459 +04379243/points/424c77a1f39ac41620dd2dd4d7d7656c.pts 04379243/expert_verified/points_label/424c77a1f39ac41620dd2dd4d7d7656c.seg 04379243 +03001627/points/8778c23fd21bdebf8a80d99ff4e76c20.pts 03001627/expert_verified/points_label/8778c23fd21bdebf8a80d99ff4e76c20.seg 03001627 +03001627/points/257deb231ce652169f2349486c570dd4.pts 03001627/expert_verified/points_label/257deb231ce652169f2349486c570dd4.seg 03001627 +03642806/points/e5559cd005d5c4942a7b0c74c5f22fc4.pts 03642806/expert_verified/points_label/e5559cd005d5c4942a7b0c74c5f22fc4.seg 03642806 +03001627/points/986e49bd8314d7424addf6a5f8726274.pts 03001627/expert_verified/points_label/986e49bd8314d7424addf6a5f8726274.seg 03001627 +04379243/points/b3fc5247186936f1dcfcef693e7ec696.pts 04379243/expert_verified/points_label/b3fc5247186936f1dcfcef693e7ec696.seg 04379243 +02691156/points/da9d111e1175d318bbf3143b1cb6076a.pts 02691156/expert_verified/points_label/da9d111e1175d318bbf3143b1cb6076a.seg 02691156 +04379243/points/54b26954e478b1a34ea8d5f5f27d7ce3.pts 04379243/expert_verified/points_label/54b26954e478b1a34ea8d5f5f27d7ce3.seg 04379243 +03001627/points/2d44744a7ea0bf724b3c42e318f3affc.pts 03001627/expert_verified/points_label/2d44744a7ea0bf724b3c42e318f3affc.seg 03001627 +04379243/points/9dd63148e5b0a4f79eaa55bb236fb6e1.pts 04379243/expert_verified/points_label/9dd63148e5b0a4f79eaa55bb236fb6e1.seg 04379243 +04379243/points/6ab7ebf9b94176456f1e07a56c129dfc.pts 04379243/expert_verified/points_label/6ab7ebf9b94176456f1e07a56c129dfc.seg 04379243 +03001627/points/6aaa9bd6e835eb0f9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/6aaa9bd6e835eb0f9b9f2eb77f5e247e.seg 03001627 +03636649/points/34020466b4342812218c9f1216abefd.pts 03636649/expert_verified/points_label/34020466b4342812218c9f1216abefd.seg 03636649 +03001627/points/df7735e2bce09a511f98c0761af40e04.pts 03001627/expert_verified/points_label/df7735e2bce09a511f98c0761af40e04.seg 03001627 +03636649/points/1d963d5c54613202b0aa15078ea6f391.pts 03636649/expert_verified/points_label/1d963d5c54613202b0aa15078ea6f391.seg 03636649 +03636649/points/5a9e0dd068e2436bd7ebac63aa51083.pts 03636649/expert_verified/points_label/5a9e0dd068e2436bd7ebac63aa51083.seg 03636649 +03001627/points/b1f50d8d41a8c53b6197fd390b16d14d.pts 03001627/expert_verified/points_label/b1f50d8d41a8c53b6197fd390b16d14d.seg 03001627 +03001627/points/285931af369b12c2ccd42a2d6eea63ed.pts 03001627/expert_verified/points_label/285931af369b12c2ccd42a2d6eea63ed.seg 03001627 +03636649/points/69429d8ffb5009a82060e7309fc3fc6.pts 03636649/expert_verified/points_label/69429d8ffb5009a82060e7309fc3fc6.seg 03636649 +04379243/points/63b53646b3562677d395837145ded71.pts 04379243/expert_verified/points_label/63b53646b3562677d395837145ded71.seg 04379243 +03001627/points/ee5ee3f6759aabacf2f43e6f841bd32b.pts 03001627/expert_verified/points_label/ee5ee3f6759aabacf2f43e6f841bd32b.seg 03001627 +02691156/points/bdfbf1c555dacd9d325212819caa597d.pts 02691156/expert_verified/points_label/bdfbf1c555dacd9d325212819caa597d.seg 02691156 +04379243/points/9f321f05a7808719ab610b0c94236463.pts 04379243/expert_verified/points_label/9f321f05a7808719ab610b0c94236463.seg 04379243 +03624134/points/fb1f385d487d13d7aa0079d6fb0f853c.pts 03624134/expert_verified/points_label/fb1f385d487d13d7aa0079d6fb0f853c.seg 03624134 +04379243/points/109738784a0a6129a02c88fe01f2b9c1.pts 04379243/expert_verified/points_label/109738784a0a6129a02c88fe01f2b9c1.seg 04379243 +03467517/points/65e3bdc247b3ce3d4de904d1abbce016.pts 03467517/expert_verified/points_label/65e3bdc247b3ce3d4de904d1abbce016.seg 03467517 +02691156/points/94ce3a5ad2576e73a5cac89017eae8d1.pts 02691156/expert_verified/points_label/94ce3a5ad2576e73a5cac89017eae8d1.seg 02691156 +03001627/points/80fab0c55a60abb7dafb0be26f6b45d5.pts 03001627/expert_verified/points_label/80fab0c55a60abb7dafb0be26f6b45d5.seg 03001627 +04379243/points/e6ee101d3cb13bdd16a2b5862518c93.pts 04379243/expert_verified/points_label/e6ee101d3cb13bdd16a2b5862518c93.seg 04379243 +04379243/points/6f2ffe8c014a6a458af30108ea9ccb6c.pts 04379243/expert_verified/points_label/6f2ffe8c014a6a458af30108ea9ccb6c.seg 04379243 +02958343/points/504793ed2da6cf7eba3e2415e22cd45c.pts 02958343/expert_verified/points_label/504793ed2da6cf7eba3e2415e22cd45c.seg 02958343 +03467517/points/9e26dcbac33f056c343b0b12983b9982.pts 03467517/expert_verified/points_label/9e26dcbac33f056c343b0b12983b9982.seg 03467517 +03467517/points/a92cd0b5d559075daa9518d76daaca23.pts 03467517/expert_verified/points_label/a92cd0b5d559075daa9518d76daaca23.seg 03467517 +03636649/points/b6989c99bba1226539b3360f500ac52a.pts 03636649/expert_verified/points_label/b6989c99bba1226539b3360f500ac52a.seg 03636649 +03624134/points/cc38f97557029b2a2b5fd8277662be97.pts 03624134/expert_verified/points_label/cc38f97557029b2a2b5fd8277662be97.seg 03624134 +03790512/points/41cc9674e700c3fdb37378f3c85478b4.pts 03790512/expert_verified/points_label/41cc9674e700c3fdb37378f3c85478b4.seg 03790512 +03001627/points/56b171b1f1521d27291d12adef12641b.pts 03001627/expert_verified/points_label/56b171b1f1521d27291d12adef12641b.seg 03001627 +03636649/points/ddc2d39dac6e84506c5b8009db95f66f.pts 03636649/expert_verified/points_label/ddc2d39dac6e84506c5b8009db95f66f.seg 03636649 +02691156/points/edc185566c1df89c35fc197bbabcd5bd.pts 02691156/expert_verified/points_label/edc185566c1df89c35fc197bbabcd5bd.seg 02691156 +04379243/points/fb5e8a6361262c26acf7920879052e93.pts 04379243/expert_verified/points_label/fb5e8a6361262c26acf7920879052e93.seg 04379243 +04379243/points/8862cddf90fddb3119fb4103277a6b93.pts 04379243/expert_verified/points_label/8862cddf90fddb3119fb4103277a6b93.seg 04379243 +02691156/points/d5a94c9f09d238c4c3a35cee92bb95b.pts 02691156/expert_verified/points_label/d5a94c9f09d238c4c3a35cee92bb95b.seg 02691156 +03636649/points/1682d4404196cf127588e2ca59b15f8.pts 03636649/expert_verified/points_label/1682d4404196cf127588e2ca59b15f8.seg 03636649 +04379243/points/2f33abdfe147813e44949d7685cb63ea.pts 04379243/expert_verified/points_label/2f33abdfe147813e44949d7685cb63ea.seg 04379243 +03001627/points/e158f7ba6828db5c654ea6737b0d3597.pts 03001627/expert_verified/points_label/e158f7ba6828db5c654ea6737b0d3597.seg 03001627 +04379243/points/564474f25a4400c5dc20930e6fc85682.pts 04379243/expert_verified/points_label/564474f25a4400c5dc20930e6fc85682.seg 04379243 +04379243/points/eb379b2b95e76502e258d1c3e7302e7b.pts 04379243/expert_verified/points_label/eb379b2b95e76502e258d1c3e7302e7b.seg 04379243 +03001627/points/3a1b54325b3565e72ca4b544d68c52.pts 03001627/expert_verified/points_label/3a1b54325b3565e72ca4b544d68c52.seg 03001627 +04225987/points/393ca71bd734f3071082f2ea630bf69e.pts 04225987/expert_verified/points_label/393ca71bd734f3071082f2ea630bf69e.seg 04225987 +03636649/points/bd1cbcb990375022b45fed2806c331ab.pts 03636649/expert_verified/points_label/bd1cbcb990375022b45fed2806c331ab.seg 03636649 +03001627/points/6a9dce6566cd61652b339ec555ba3bfc.pts 03001627/expert_verified/points_label/6a9dce6566cd61652b339ec555ba3bfc.seg 03001627 +02691156/points/94379090010cd6bb874c9ce092a813ef.pts 02691156/expert_verified/points_label/94379090010cd6bb874c9ce092a813ef.seg 02691156 +02773838/points/d3bd250ca3cb8e29976855a35549333.pts 02773838/expert_verified/points_label/d3bd250ca3cb8e29976855a35549333.seg 02773838 +03001627/points/36cb782fbc164ac312591a3ac05fadf1.pts 03001627/expert_verified/points_label/36cb782fbc164ac312591a3ac05fadf1.seg 03001627 +03642806/points/2211a40cc77a085362c091e763f81d3.pts 03642806/expert_verified/points_label/2211a40cc77a085362c091e763f81d3.seg 03642806 +04379243/points/5cbd726c3ffd8fc49b458816be7a3962.pts 04379243/expert_verified/points_label/5cbd726c3ffd8fc49b458816be7a3962.seg 04379243 +02691156/points/72aee7d0e998a68aca8607f540cc62ba.pts 02691156/expert_verified/points_label/72aee7d0e998a68aca8607f540cc62ba.seg 02691156 +04379243/points/1c3310f4c05ce1f6a192483aa282f8e5.pts 04379243/expert_verified/points_label/1c3310f4c05ce1f6a192483aa282f8e5.seg 04379243 +04379243/points/4ced745f960f7439b91767277279ac70.pts 04379243/expert_verified/points_label/4ced745f960f7439b91767277279ac70.seg 04379243 +03642806/points/8d70fb6adc63e21eb7e0383b9609fa5.pts 03642806/expert_verified/points_label/8d70fb6adc63e21eb7e0383b9609fa5.seg 03642806 +03001627/points/2bd6800d64c01d677721fafb59ea099.pts 03001627/expert_verified/points_label/2bd6800d64c01d677721fafb59ea099.seg 03001627 +03467517/points/1abe78447898821e93f0194265a9746c.pts 03467517/expert_verified/points_label/1abe78447898821e93f0194265a9746c.seg 03467517 +02691156/points/9bf3c126d5918c41f5c7319b71bdce6e.pts 02691156/expert_verified/points_label/9bf3c126d5918c41f5c7319b71bdce6e.seg 02691156 +03642806/points/1312ea502b4e9b51701c1f58e22b85e8.pts 03642806/expert_verified/points_label/1312ea502b4e9b51701c1f58e22b85e8.seg 03642806 +04379243/points/a9cc8112fb8c4ed5dfd21203bf8b4b46.pts 04379243/expert_verified/points_label/a9cc8112fb8c4ed5dfd21203bf8b4b46.seg 04379243 +03642806/points/62b25a5e3119b8409023147b38c03c9f.pts 03642806/expert_verified/points_label/62b25a5e3119b8409023147b38c03c9f.seg 03642806 +04379243/points/a4fcd8afe8b6de585beaf00da5b709c2.pts 04379243/expert_verified/points_label/a4fcd8afe8b6de585beaf00da5b709c2.seg 04379243 +03636649/points/907fd296708ae71dd5fab5deb286066.pts 03636649/expert_verified/points_label/907fd296708ae71dd5fab5deb286066.seg 03636649 +04379243/points/c5ae96124c15c734e6c5cd45aa112726.pts 04379243/expert_verified/points_label/c5ae96124c15c734e6c5cd45aa112726.seg 04379243 +03642806/points/ef6d43add46d0cae4e07b09c086cc5c4.pts 03642806/expert_verified/points_label/ef6d43add46d0cae4e07b09c086cc5c4.seg 03642806 +04379243/points/8d07df2bf706cda58c5591114064d173.pts 04379243/expert_verified/points_label/8d07df2bf706cda58c5591114064d173.seg 04379243 +02958343/points/5316fab78a6732f0428df271ebc70bc0.pts 02958343/expert_verified/points_label/5316fab78a6732f0428df271ebc70bc0.seg 02958343 +03467517/points/7946e354e342f560c5a468097fc791e4.pts 03467517/expert_verified/points_label/7946e354e342f560c5a468097fc791e4.seg 03467517 +03467517/points/d3684d071dcb6bffd3193ed047bef161.pts 03467517/expert_verified/points_label/d3684d071dcb6bffd3193ed047bef161.seg 03467517 +04379243/points/33b081062b2195e71771ee930e861b13.pts 04379243/expert_verified/points_label/33b081062b2195e71771ee930e861b13.seg 04379243 +02958343/points/511962626501e4abf500cc506a763c18.pts 02958343/expert_verified/points_label/511962626501e4abf500cc506a763c18.seg 02958343 +03797390/points/c82b9f1b98f044fc15cf6e5ad80f2da.pts 03797390/expert_verified/points_label/c82b9f1b98f044fc15cf6e5ad80f2da.seg 03797390 +04379243/points/49f625856c796254d249abd69334079c.pts 04379243/expert_verified/points_label/49f625856c796254d249abd69334079c.seg 04379243 +03001627/points/ca4900c42b8016ef8397cd720acaa508.pts 03001627/expert_verified/points_label/ca4900c42b8016ef8397cd720acaa508.seg 03001627 +03636649/points/31a15957bd4f32f87eedf2c7d21f7cfa.pts 03636649/expert_verified/points_label/31a15957bd4f32f87eedf2c7d21f7cfa.seg 03636649 +03797390/points/928a383f79698c3fb6d9bc28c8d8a2c4.pts 03797390/expert_verified/points_label/928a383f79698c3fb6d9bc28c8d8a2c4.seg 03797390 +04379243/points/17e5a64889ca085fa5526f91aecc0c37.pts 04379243/expert_verified/points_label/17e5a64889ca085fa5526f91aecc0c37.seg 04379243 +02958343/points/cbe2dc469c47bb80425b2c354eccabaf.pts 02958343/expert_verified/points_label/cbe2dc469c47bb80425b2c354eccabaf.seg 02958343 +03001627/points/19c8189116dd7cd3e95c611687989498.pts 03001627/expert_verified/points_label/19c8189116dd7cd3e95c611687989498.seg 03001627 +03636649/points/7f518fe982aae1b5940c8a2639c8747.pts 03636649/expert_verified/points_label/7f518fe982aae1b5940c8a2639c8747.seg 03636649 +03636649/points/7b1fef0071908d4bd93768e7b9b1eabf.pts 03636649/expert_verified/points_label/7b1fef0071908d4bd93768e7b9b1eabf.seg 03636649 +03001627/points/475e2c8f7a2c1bbd9acf9a86c283d1a2.pts 03001627/expert_verified/points_label/475e2c8f7a2c1bbd9acf9a86c283d1a2.seg 03001627 +03467517/points/5c805aca7aa8bdd3ac61a2f8346a8f.pts 03467517/expert_verified/points_label/5c805aca7aa8bdd3ac61a2f8346a8f.seg 03467517 +03790512/points/8032295bd3851d75468bac13e007a6e9.pts 03790512/expert_verified/points_label/8032295bd3851d75468bac13e007a6e9.seg 03790512 +02691156/points/3e0561d70c7fd4f51c6e4e20f2b76086.pts 02691156/expert_verified/points_label/3e0561d70c7fd4f51c6e4e20f2b76086.seg 02691156 +02691156/points/e5610bbacaf098508b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/e5610bbacaf098508b96ae1a0a8b84ec.seg 02691156 +03467517/points/97e8ee1b6df404bd57700c05b1862d8.pts 03467517/expert_verified/points_label/97e8ee1b6df404bd57700c05b1862d8.seg 03467517 +03636649/points/981b55897cee64403c8d0fdfb1cc2535.pts 03636649/expert_verified/points_label/981b55897cee64403c8d0fdfb1cc2535.seg 03636649 +04379243/points/204d9ecc196990ebe8479ad2eabcbab4.pts 04379243/expert_verified/points_label/204d9ecc196990ebe8479ad2eabcbab4.seg 04379243 +04379243/points/9d039675f4d51869f3edd695842c6d58.pts 04379243/expert_verified/points_label/9d039675f4d51869f3edd695842c6d58.seg 04379243 +03467517/points/cb5b2e3f499e4fdecc571cd3cf8f17a1.pts 03467517/expert_verified/points_label/cb5b2e3f499e4fdecc571cd3cf8f17a1.seg 03467517 +04379243/points/5243b5491a4f8a16a2b5862518c93.pts 04379243/expert_verified/points_label/5243b5491a4f8a16a2b5862518c93.seg 04379243 +04379243/points/efbf0d75648b7c7d5792b99b8245d225.pts 04379243/expert_verified/points_label/efbf0d75648b7c7d5792b99b8245d225.seg 04379243 +03001627/points/c8265e04c94bcb5a1346e336f65f96f6.pts 03001627/expert_verified/points_label/c8265e04c94bcb5a1346e336f65f96f6.seg 03001627 +02958343/points/94cfcfb74e246f938acb0ff76f4aec7d.pts 02958343/expert_verified/points_label/94cfcfb74e246f938acb0ff76f4aec7d.seg 02958343 +03467517/points/a0b6f040538d26e3ac61a2f8346a8f.pts 03467517/expert_verified/points_label/a0b6f040538d26e3ac61a2f8346a8f.seg 03467517 +03001627/points/70f1f85d47c970bb78dd615a59de5f05.pts 03001627/expert_verified/points_label/70f1f85d47c970bb78dd615a59de5f05.seg 03001627 +04379243/points/f4976e80b8533bcf85518f8659f21d56.pts 04379243/expert_verified/points_label/f4976e80b8533bcf85518f8659f21d56.seg 04379243 +03636649/points/9fdaafde365beafc37f7ce56c66316ea.pts 03636649/expert_verified/points_label/9fdaafde365beafc37f7ce56c66316ea.seg 03636649 +03467517/points/22033c6d7e5a90f193f0194265a9746c.pts 03467517/expert_verified/points_label/22033c6d7e5a90f193f0194265a9746c.seg 03467517 +02691156/points/c1b5dc92221bcdad5fc84bf2b9ef981.pts 02691156/expert_verified/points_label/c1b5dc92221bcdad5fc84bf2b9ef981.seg 02691156 +04379243/points/79d0985603f7ff3be6c5cd45aa112726.pts 04379243/expert_verified/points_label/79d0985603f7ff3be6c5cd45aa112726.seg 04379243 +03467517/points/5d6c1516b83dec8663e148e250c0340d.pts 03467517/expert_verified/points_label/5d6c1516b83dec8663e148e250c0340d.seg 03467517 +04379243/points/79c5df613523a462d42b9650f19dd425.pts 04379243/expert_verified/points_label/79c5df613523a462d42b9650f19dd425.seg 04379243 +03001627/points/f19e8da9d8f369c531e63f1270e2b445.pts 03001627/expert_verified/points_label/f19e8da9d8f369c531e63f1270e2b445.seg 03001627 +03001627/points/9a711bb7070ae88de948e3d64826c640.pts 03001627/expert_verified/points_label/9a711bb7070ae88de948e3d64826c640.seg 03001627 +03467517/points/2adbf6c3f8f2d9ca7fe36b1f0a632ed8.pts 03467517/expert_verified/points_label/2adbf6c3f8f2d9ca7fe36b1f0a632ed8.seg 03467517 +03001627/points/837ba605a4ab4a4f19fb4103277a6b93.pts 03001627/expert_verified/points_label/837ba605a4ab4a4f19fb4103277a6b93.seg 03001627 +03001627/points/807f08096308af5e28c0cecb7de2397a.pts 03001627/expert_verified/points_label/807f08096308af5e28c0cecb7de2397a.seg 03001627 +03467517/points/275c4f98ef07f2b393f0194265a9746c.pts 03467517/expert_verified/points_label/275c4f98ef07f2b393f0194265a9746c.seg 03467517 +04379243/points/57afaabf994feb305512673aa47c7e3d.pts 04379243/expert_verified/points_label/57afaabf994feb305512673aa47c7e3d.seg 04379243 +03001627/points/d9156f5552178de2713decb1a0563b12.pts 03001627/expert_verified/points_label/d9156f5552178de2713decb1a0563b12.seg 03001627 +03948459/points/fe62130ce6fcd9b77754fed890b42399.pts 03948459/expert_verified/points_label/fe62130ce6fcd9b77754fed890b42399.seg 03948459 +03261776/points/1757fe64e76a9630fc176230c2f2d294.pts 03261776/expert_verified/points_label/1757fe64e76a9630fc176230c2f2d294.seg 03261776 +03790512/points/3fd1bff496b369f71765540024eb9fef.pts 03790512/expert_verified/points_label/3fd1bff496b369f71765540024eb9fef.seg 03790512 +02958343/points/a6d494af391a97686436916a86a90ed7.pts 02958343/expert_verified/points_label/a6d494af391a97686436916a86a90ed7.seg 02958343 +04099429/points/59389aac7b1ea9b09b28f5f9cf8893b5.pts 04099429/expert_verified/points_label/59389aac7b1ea9b09b28f5f9cf8893b5.seg 04099429 +04379243/points/c399ed276ed35cb9a6ce08f0d82ba063.pts 04379243/expert_verified/points_label/c399ed276ed35cb9a6ce08f0d82ba063.seg 04379243 +03624134/points/e4f610f36ba3c6f69246ea0301684d80.pts 03624134/expert_verified/points_label/e4f610f36ba3c6f69246ea0301684d80.seg 03624134 +03636649/points/90b0f9a1ac2e54ecbc7f58784fda27b5.pts 03636649/expert_verified/points_label/90b0f9a1ac2e54ecbc7f58784fda27b5.seg 03636649 +03636649/points/e5e9ff118631c2a3ee088de33038f12a.pts 03636649/expert_verified/points_label/e5e9ff118631c2a3ee088de33038f12a.seg 03636649 +04099429/points/4936716925b1cd6428eba1f0b7744e9.pts 04099429/expert_verified/points_label/4936716925b1cd6428eba1f0b7744e9.seg 04099429 +04379243/points/6e446bb5adf14b0b6121178eafd002fd.pts 04379243/expert_verified/points_label/6e446bb5adf14b0b6121178eafd002fd.seg 04379243 +03001627/points/7ea38c936513f5df3772b104757a4809.pts 03001627/expert_verified/points_label/7ea38c936513f5df3772b104757a4809.seg 03001627 +04379243/points/23d68e01b77089ae76ad4f5e7c7020eb.pts 04379243/expert_verified/points_label/23d68e01b77089ae76ad4f5e7c7020eb.seg 04379243 +03636649/points/4d6bced89943df73b4edf02c99e16daa.pts 03636649/expert_verified/points_label/4d6bced89943df73b4edf02c99e16daa.seg 03636649 +04379243/points/3459eec8eb56fa312bac236fe109e385.pts 04379243/expert_verified/points_label/3459eec8eb56fa312bac236fe109e385.seg 04379243 +03261776/points/1a5e2a7cddc8e46aa681aea7976a4565.pts 03261776/expert_verified/points_label/1a5e2a7cddc8e46aa681aea7976a4565.seg 03261776 +03001627/points/ed0d65c68a1fa5c485e2f8b1d3a373fe.pts 03001627/expert_verified/points_label/ed0d65c68a1fa5c485e2f8b1d3a373fe.seg 03001627 +03636649/points/7b005e23eae2768eb08c032bedc99529.pts 03636649/expert_verified/points_label/7b005e23eae2768eb08c032bedc99529.seg 03636649 +04379243/points/3f2e9c14ab1d26a0ebead06af665220.pts 04379243/expert_verified/points_label/3f2e9c14ab1d26a0ebead06af665220.seg 04379243 +03001627/points/383ab6330284af461fc4ae93e00c18e5.pts 03001627/expert_verified/points_label/383ab6330284af461fc4ae93e00c18e5.seg 03001627 +02691156/points/fc7387d630c84bb9c863ab010b80d9ed.pts 02691156/expert_verified/points_label/fc7387d630c84bb9c863ab010b80d9ed.seg 02691156 +04225987/points/344e9402d06bd94031145076011658c5.pts 04225987/expert_verified/points_label/344e9402d06bd94031145076011658c5.seg 04225987 +04379243/points/745a2b060d0f692bf4b6538438a0b930.pts 04379243/expert_verified/points_label/745a2b060d0f692bf4b6538438a0b930.seg 04379243 +04379243/points/928ea87878a7bbe26cf876b69450cd4e.pts 04379243/expert_verified/points_label/928ea87878a7bbe26cf876b69450cd4e.seg 04379243 +03001627/points/5fe56a4a9d5508c3b2373df00b89e5d.pts 03001627/expert_verified/points_label/5fe56a4a9d5508c3b2373df00b89e5d.seg 03001627 +02691156/points/6a75658fb8242b9c590874dcd9dc8481.pts 02691156/expert_verified/points_label/6a75658fb8242b9c590874dcd9dc8481.seg 02691156 +03948459/points/f377665c5b17d0ce61b636d79e46a7e9.pts 03948459/expert_verified/points_label/f377665c5b17d0ce61b636d79e46a7e9.seg 03948459 +03642806/points/ab21f75b97d6b1054f22ce0a3592d5.pts 03642806/expert_verified/points_label/ab21f75b97d6b1054f22ce0a3592d5.seg 03642806 +04379243/points/a2baf45f001e118e2c79f7f31759bfa7.pts 04379243/expert_verified/points_label/a2baf45f001e118e2c79f7f31759bfa7.seg 04379243 +02691156/points/19ff8fce1658f864ca8607f540cc62ba.pts 02691156/expert_verified/points_label/19ff8fce1658f864ca8607f540cc62ba.seg 02691156 +04379243/points/8bb3a7e1cb24fe6febad4f49b26ec52.pts 04379243/expert_verified/points_label/8bb3a7e1cb24fe6febad4f49b26ec52.seg 04379243 +04379243/points/dbc5a4d1dc3a6e8271a782a4379556c7.pts 04379243/expert_verified/points_label/dbc5a4d1dc3a6e8271a782a4379556c7.seg 04379243 +03001627/points/e6c11fed9469141ace8fba09dd640742.pts 03001627/expert_verified/points_label/e6c11fed9469141ace8fba09dd640742.seg 03001627 +03797390/points/f99e19b8c4a729353deb88581ea8417a.pts 03797390/expert_verified/points_label/f99e19b8c4a729353deb88581ea8417a.seg 03797390 +03001627/points/d454f99b99248bf337c99625b0c170be.pts 03001627/expert_verified/points_label/d454f99b99248bf337c99625b0c170be.seg 03001627 +03636649/points/7c23362b39f318cbb18d6f615cb18bdd.pts 03636649/expert_verified/points_label/7c23362b39f318cbb18d6f615cb18bdd.seg 03636649 +03001627/points/d8e2e2a923b372731cf97e154cc62f43.pts 03001627/expert_verified/points_label/d8e2e2a923b372731cf97e154cc62f43.seg 03001627 +03642806/points/621882a4afd2a126369873c1090720a1.pts 03642806/expert_verified/points_label/621882a4afd2a126369873c1090720a1.seg 03642806 +04379243/points/d5d1e750bb492dd5391e4d6c585a697a.pts 04379243/expert_verified/points_label/d5d1e750bb492dd5391e4d6c585a697a.seg 04379243 +03467517/points/42f3172b8770d2fd2200c35bfa7099ee.pts 03467517/expert_verified/points_label/42f3172b8770d2fd2200c35bfa7099ee.seg 03467517 +03624134/points/a2288d5f3a44233bc40c6b891c4913bd.pts 03624134/expert_verified/points_label/a2288d5f3a44233bc40c6b891c4913bd.seg 03624134 +02691156/points/90612205109d7458e84aab2e1d454e3c.pts 02691156/expert_verified/points_label/90612205109d7458e84aab2e1d454e3c.seg 02691156 +03001627/points/2c03bcb2a133ce28bb6caad47eee6580.pts 03001627/expert_verified/points_label/2c03bcb2a133ce28bb6caad47eee6580.seg 03001627 +03001627/points/f23d3a85baabd7ae32d9baba75737e72.pts 03001627/expert_verified/points_label/f23d3a85baabd7ae32d9baba75737e72.seg 03001627 +04379243/points/90be5de0faef91ef3f7e27638e63d848.pts 04379243/expert_verified/points_label/90be5de0faef91ef3f7e27638e63d848.seg 04379243 +02691156/points/d5f01e2aa54bbf28ca8607f540cc62ba.pts 02691156/expert_verified/points_label/d5f01e2aa54bbf28ca8607f540cc62ba.seg 02691156 +02691156/points/4f0bf26c62bb7c8b7e1c97634acf0214.pts 02691156/expert_verified/points_label/4f0bf26c62bb7c8b7e1c97634acf0214.seg 02691156 +03001627/points/4246c8c293c56ea34b3c42e318f3affc.pts 03001627/expert_verified/points_label/4246c8c293c56ea34b3c42e318f3affc.seg 03001627 +04379243/points/9b42cb91ccead6d42f6d10c5d1d56320.pts 04379243/expert_verified/points_label/9b42cb91ccead6d42f6d10c5d1d56320.seg 04379243 +03001627/points/c67b7b62e529295dfc30525e763ef5eb.pts 03001627/expert_verified/points_label/c67b7b62e529295dfc30525e763ef5eb.seg 03001627 +04379243/points/394c63a5658ef759b515d1675be6b5d3.pts 04379243/expert_verified/points_label/394c63a5658ef759b515d1675be6b5d3.seg 04379243 +03636649/points/13ba3fbe8fbc53f3ef3a2c64cef919d0.pts 03636649/expert_verified/points_label/13ba3fbe8fbc53f3ef3a2c64cef919d0.seg 03636649 +04379243/points/cb860d60db8f3d18febad4f49b26ec52.pts 04379243/expert_verified/points_label/cb860d60db8f3d18febad4f49b26ec52.seg 04379243 +04379243/points/657aad273d665f5dd9823f45c4411583.pts 04379243/expert_verified/points_label/657aad273d665f5dd9823f45c4411583.seg 04379243 +03001627/points/64fcd1ba0df5d54d79b3e1be3524f72f.pts 03001627/expert_verified/points_label/64fcd1ba0df5d54d79b3e1be3524f72f.seg 03001627 +03642806/points/8489cb783d249651b674654e7bbe623d.pts 03642806/expert_verified/points_label/8489cb783d249651b674654e7bbe623d.seg 03642806 +03467517/points/3824a2336972d144a24eeca91f583600.pts 03467517/expert_verified/points_label/3824a2336972d144a24eeca91f583600.seg 03467517 +03797390/points/99eaa69cf6fe8811dec712af445786fe.pts 03797390/expert_verified/points_label/99eaa69cf6fe8811dec712af445786fe.seg 03797390 +03001627/points/e31d71ed32273fede42ac999db581f5e.pts 03001627/expert_verified/points_label/e31d71ed32273fede42ac999db581f5e.seg 03001627 +03001627/points/9a42cff883cbd358106f706dac6c58f0.pts 03001627/expert_verified/points_label/9a42cff883cbd358106f706dac6c58f0.seg 03001627 +04379243/points/b515a107aa3a3fd0e3dff0d5ebb43915.pts 04379243/expert_verified/points_label/b515a107aa3a3fd0e3dff0d5ebb43915.seg 04379243 +03001627/points/bd6a8b133fa4d269491d6cee03fef2a9.pts 03001627/expert_verified/points_label/bd6a8b133fa4d269491d6cee03fef2a9.seg 03001627 +03001627/points/51c8f249e778e84a5bae8923b29985ad.pts 03001627/expert_verified/points_label/51c8f249e778e84a5bae8923b29985ad.seg 03001627 +02691156/points/f12eefbbefabe566ca8607f540cc62ba.pts 02691156/expert_verified/points_label/f12eefbbefabe566ca8607f540cc62ba.seg 02691156 +02691156/points/ad6e93a1db3e1da5977e4bb19a62128e.pts 02691156/expert_verified/points_label/ad6e93a1db3e1da5977e4bb19a62128e.seg 02691156 +03001627/points/efa83c67ce47bfca304edcf7c4314468.pts 03001627/expert_verified/points_label/efa83c67ce47bfca304edcf7c4314468.seg 03001627 +03624134/points/d6e9e4e07bafca0fa37f3fc191551700.pts 03624134/expert_verified/points_label/d6e9e4e07bafca0fa37f3fc191551700.seg 03624134 +03642806/points/e083105e9c2a28bb0c3a03d0a1f182f.pts 03642806/expert_verified/points_label/e083105e9c2a28bb0c3a03d0a1f182f.seg 03642806 +03001627/points/d2992fd5e6715bad3bbf93f83cbaf271.pts 03001627/expert_verified/points_label/d2992fd5e6715bad3bbf93f83cbaf271.seg 03001627 +04379243/points/4a27cb9384782ce33e95c55cb020b7e6.pts 04379243/expert_verified/points_label/4a27cb9384782ce33e95c55cb020b7e6.seg 04379243 +04379243/points/cf046edeff204b81cdf7280ff8af6720.pts 04379243/expert_verified/points_label/cf046edeff204b81cdf7280ff8af6720.seg 04379243 +03001627/points/6534f04a1c349a3c8c6540fe6bc16d6f.pts 03001627/expert_verified/points_label/6534f04a1c349a3c8c6540fe6bc16d6f.seg 03001627 +03636649/points/1917888a2b6901091735ea0e092a805a.pts 03636649/expert_verified/points_label/1917888a2b6901091735ea0e092a805a.seg 03636649 +03636649/points/b37e07ac31fa4f311735ea0e092a805a.pts 03636649/expert_verified/points_label/b37e07ac31fa4f311735ea0e092a805a.seg 03636649 +03636649/points/2f6f1fe66631572c6c5b8009db95f66f.pts 03636649/expert_verified/points_label/2f6f1fe66631572c6c5b8009db95f66f.seg 03636649 +03467517/points/feab270427cee00a24eeca91f583600.pts 03467517/expert_verified/points_label/feab270427cee00a24eeca91f583600.seg 03467517 +02691156/points/e30e25fe047ce1ea10b08ceced9a0113.pts 02691156/expert_verified/points_label/e30e25fe047ce1ea10b08ceced9a0113.seg 02691156 +03636649/points/b2347fe81bd2db6a4b3c42e318f3affc.pts 03636649/expert_verified/points_label/b2347fe81bd2db6a4b3c42e318f3affc.seg 03636649 +03001627/points/bb7755090f984ba85dd1bba5b1310523.pts 03001627/expert_verified/points_label/bb7755090f984ba85dd1bba5b1310523.seg 03001627 +02691156/points/bc7ead8b45952ab8822054a0a020bf4a.pts 02691156/expert_verified/points_label/bc7ead8b45952ab8822054a0a020bf4a.seg 02691156 +02691156/points/5a1d4af1f417d28566cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/5a1d4af1f417d28566cf1b4a8fc3914e.seg 02691156 +02691156/points/a6cbada42d1a30d0f5c7319b71bdce6e.pts 02691156/expert_verified/points_label/a6cbada42d1a30d0f5c7319b71bdce6e.seg 02691156 +02691156/points/b785b39d10c33b5de9f07d25f575b2d4.pts 02691156/expert_verified/points_label/b785b39d10c33b5de9f07d25f575b2d4.seg 02691156 +03001627/points/2df8d2af1bc4b9972056b4bd5d870b47.pts 03001627/expert_verified/points_label/2df8d2af1bc4b9972056b4bd5d870b47.seg 03001627 +03797390/points/d46b98f63a017578ea456f4bbbc96af9.pts 03797390/expert_verified/points_label/d46b98f63a017578ea456f4bbbc96af9.seg 03797390 +04379243/points/1adf96850963550f19fb4103277a6b93.pts 04379243/expert_verified/points_label/1adf96850963550f19fb4103277a6b93.seg 04379243 +03001627/points/cb7a4324fdfa690e96dd43aa0ec847c9.pts 03001627/expert_verified/points_label/cb7a4324fdfa690e96dd43aa0ec847c9.seg 03001627 +03624134/points/c19088b4c32c0f1d22b38218e60be05.pts 03624134/expert_verified/points_label/c19088b4c32c0f1d22b38218e60be05.seg 03624134 +04379243/points/1acf7b0939f3eea2eafdf94e5032b200.pts 04379243/expert_verified/points_label/1acf7b0939f3eea2eafdf94e5032b200.seg 04379243 +03467517/points/d50d06b159363b1693f0194265a9746c.pts 03467517/expert_verified/points_label/d50d06b159363b1693f0194265a9746c.seg 03467517 +02691156/points/dacb447d7820e7f7ca8607f540cc62ba.pts 02691156/expert_verified/points_label/dacb447d7820e7f7ca8607f540cc62ba.seg 02691156 +04379243/points/c3a9dc47c5bf10aac3bd24f986301745.pts 04379243/expert_verified/points_label/c3a9dc47c5bf10aac3bd24f986301745.seg 04379243 +04379243/points/4791914b3bcaf57efebad4f49b26ec52.pts 04379243/expert_verified/points_label/4791914b3bcaf57efebad4f49b26ec52.seg 04379243 +03001627/points/bf3f14225e8f899db62f9fb4b7f0626.pts 03001627/expert_verified/points_label/bf3f14225e8f899db62f9fb4b7f0626.seg 03001627 +04379243/points/4f5c111a89b3fd27aa29e9f0529e8ef7.pts 04379243/expert_verified/points_label/4f5c111a89b3fd27aa29e9f0529e8ef7.seg 04379243 +03001627/points/6af8d7bfa508b8d23759750e8db40476.pts 03001627/expert_verified/points_label/6af8d7bfa508b8d23759750e8db40476.seg 03001627 +02691156/points/427030abcc0f11a8947bbeb9022263b8.pts 02691156/expert_verified/points_label/427030abcc0f11a8947bbeb9022263b8.seg 02691156 +03642806/points/367fbaea8743ec1cc98452c8fce6b43.pts 03642806/expert_verified/points_label/367fbaea8743ec1cc98452c8fce6b43.seg 03642806 +04379243/points/419412b927d11c7d8312881285c04cb3.pts 04379243/expert_verified/points_label/419412b927d11c7d8312881285c04cb3.seg 04379243 +03001627/points/56cc047440e7c999a23949c21eddef76.pts 03001627/expert_verified/points_label/56cc047440e7c999a23949c21eddef76.seg 03001627 +03790512/points/fdb6223c286cb653cc9e7530f9d8e186.pts 03790512/expert_verified/points_label/fdb6223c286cb653cc9e7530f9d8e186.seg 03790512 +03636649/points/6b2a590446ad5794b10e111f2d30684d.pts 03636649/expert_verified/points_label/6b2a590446ad5794b10e111f2d30684d.seg 03636649 +03001627/points/a3ce9ba74ab50352e6fe3612af521500.pts 03001627/expert_verified/points_label/a3ce9ba74ab50352e6fe3612af521500.seg 03001627 +02958343/points/9986dd19b2c459152470de2774d6099.pts 02958343/expert_verified/points_label/9986dd19b2c459152470de2774d6099.seg 02958343 +03642806/points/b806daf849a5dba289c212008d2a390e.pts 03642806/expert_verified/points_label/b806daf849a5dba289c212008d2a390e.seg 03642806 +04379243/points/2eb503dde3cc027d86c701087a194026.pts 04379243/expert_verified/points_label/2eb503dde3cc027d86c701087a194026.seg 04379243 +03001627/points/c4a4710012ee39bd19f4b416b31c46e0.pts 03001627/expert_verified/points_label/c4a4710012ee39bd19f4b416b31c46e0.seg 03001627 +02958343/points/bd8654fbca233e41ddb8f37b1865d989.pts 02958343/expert_verified/points_label/bd8654fbca233e41ddb8f37b1865d989.seg 02958343 +03001627/points/6fd485a2345c3dd69233bf560301e53.pts 03001627/expert_verified/points_label/6fd485a2345c3dd69233bf560301e53.seg 03001627 +02691156/points/aebc4c46b3cb7c3bca8607f540cc62ba.pts 02691156/expert_verified/points_label/aebc4c46b3cb7c3bca8607f540cc62ba.seg 02691156 +03001627/points/9343df9a7ed6cbba1923501fcdd899bb.pts 03001627/expert_verified/points_label/9343df9a7ed6cbba1923501fcdd899bb.seg 03001627 +04379243/points/7fadae39394c5622c3bd24f986301745.pts 04379243/expert_verified/points_label/7fadae39394c5622c3bd24f986301745.seg 04379243 +03001627/points/d619fd50c4d0fb46dea83bbf303af433.pts 03001627/expert_verified/points_label/d619fd50c4d0fb46dea83bbf303af433.seg 03001627 +04379243/points/ef02c88a34b3888a1b1a00a31bfed97b.pts 04379243/expert_verified/points_label/ef02c88a34b3888a1b1a00a31bfed97b.seg 04379243 +03467517/points/71d0016078dea05a94ca7929d4ba6d2d.pts 03467517/expert_verified/points_label/71d0016078dea05a94ca7929d4ba6d2d.seg 03467517 +03001627/points/5623d0ec9efedbc9d4da89766e80607a.pts 03001627/expert_verified/points_label/5623d0ec9efedbc9d4da89766e80607a.seg 03001627 +04379243/points/21486e6d0bd896ad5cca18918d24f6cd.pts 04379243/expert_verified/points_label/21486e6d0bd896ad5cca18918d24f6cd.seg 04379243 +03636649/points/978df83c1cee012729a60d6ab40898d.pts 03636649/expert_verified/points_label/978df83c1cee012729a60d6ab40898d.seg 03636649 +02691156/points/350d12f5290908c7f446f92b52bbd82a.pts 02691156/expert_verified/points_label/350d12f5290908c7f446f92b52bbd82a.seg 02691156 +03636649/points/86d7a728dc35d634f800b597bc1c1eb5.pts 03636649/expert_verified/points_label/86d7a728dc35d634f800b597bc1c1eb5.seg 03636649 +03001627/points/3b4292989394ba62f51f77a6d7299806.pts 03001627/expert_verified/points_label/3b4292989394ba62f51f77a6d7299806.seg 03001627 +03001627/points/f5f18fccf9e16800dbd185de408ea209.pts 03001627/expert_verified/points_label/f5f18fccf9e16800dbd185de408ea209.seg 03001627 +04379243/points/4d873bf1a658dcd523eb3ad3d378722a.pts 04379243/expert_verified/points_label/4d873bf1a658dcd523eb3ad3d378722a.seg 04379243 +03001627/points/a3e4639ff201f69b22a3043dcd383f68.pts 03001627/expert_verified/points_label/a3e4639ff201f69b22a3043dcd383f68.seg 03001627 +04379243/points/8d247c6f6aaf805a2530bfb25087f2b0.pts 04379243/expert_verified/points_label/8d247c6f6aaf805a2530bfb25087f2b0.seg 04379243 +03467517/points/511fc5ccf4f1c857a24eeca91f583600.pts 03467517/expert_verified/points_label/511fc5ccf4f1c857a24eeca91f583600.seg 03467517 +02691156/points/4635326bc4fdc3e9297cd7e2ef7dfa80.pts 02691156/expert_verified/points_label/4635326bc4fdc3e9297cd7e2ef7dfa80.seg 02691156 +03001627/points/525776b59266140381dff5c2e57ad46e.pts 03001627/expert_verified/points_label/525776b59266140381dff5c2e57ad46e.seg 03001627 +03001627/points/f1d6552ca66b2e37713decb1a0563b12.pts 03001627/expert_verified/points_label/f1d6552ca66b2e37713decb1a0563b12.seg 03001627 +04379243/points/40ff8ae39ad13d014a873bbe35452b88.pts 04379243/expert_verified/points_label/40ff8ae39ad13d014a873bbe35452b88.seg 04379243 +02691156/points/59f258b7aa7c1f7aa7d0c1e4eb8db7dc.pts 02691156/expert_verified/points_label/59f258b7aa7c1f7aa7d0c1e4eb8db7dc.seg 02691156 +04379243/points/63aa14915f59ed8671a782a4379556c7.pts 04379243/expert_verified/points_label/63aa14915f59ed8671a782a4379556c7.seg 04379243 +02691156/points/e16f9cc7dedcacdb9b0435532743fd43.pts 02691156/expert_verified/points_label/e16f9cc7dedcacdb9b0435532743fd43.seg 02691156 +04379243/points/c5b83c681c085f2195493ccf8f26ab2c.pts 04379243/expert_verified/points_label/c5b83c681c085f2195493ccf8f26ab2c.seg 04379243 +03001627/points/b2ba1569509cdb439451566a8c6563ed.pts 03001627/expert_verified/points_label/b2ba1569509cdb439451566a8c6563ed.seg 03001627 +02691156/points/265f5348ab2320b2148672750a1a335.pts 02691156/expert_verified/points_label/265f5348ab2320b2148672750a1a335.seg 02691156 +03001627/points/47da08d9c7cd7e104b3c42e318f3affc.pts 03001627/expert_verified/points_label/47da08d9c7cd7e104b3c42e318f3affc.seg 03001627 +03001627/points/458356b9c5a8d7bd7cc86734cb2f5062.pts 03001627/expert_verified/points_label/458356b9c5a8d7bd7cc86734cb2f5062.seg 03001627 +02691156/points/d20e3ed9b3430672bbf3143b1cb6076a.pts 02691156/expert_verified/points_label/d20e3ed9b3430672bbf3143b1cb6076a.seg 02691156 +04379243/points/c45e6ceae72c7a97be8908669c476d49.pts 04379243/expert_verified/points_label/c45e6ceae72c7a97be8908669c476d49.seg 04379243 +03001627/points/d9bbd1a1eaf6d2259d3ea1c6b57a0095.pts 03001627/expert_verified/points_label/d9bbd1a1eaf6d2259d3ea1c6b57a0095.seg 03001627 +02958343/points/8242b114695b68286f522b2bb8ded829.pts 02958343/expert_verified/points_label/8242b114695b68286f522b2bb8ded829.seg 02958343 +03001627/points/e4b40369894a16ce6821a1e68ba5ebab.pts 03001627/expert_verified/points_label/e4b40369894a16ce6821a1e68ba5ebab.seg 03001627 +03636649/points/dfe800d8d8642e9647bc3701b998a7d5.pts 03636649/expert_verified/points_label/dfe800d8d8642e9647bc3701b998a7d5.seg 03636649 +04379243/points/bdf7606e8d493149664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/bdf7606e8d493149664b3b9b23ddfcbc.seg 04379243 +03001627/points/6015aaa9ef170d9bfdef1c01cbd4ae0c.pts 03001627/expert_verified/points_label/6015aaa9ef170d9bfdef1c01cbd4ae0c.seg 03001627 +03624134/points/df7a65224f295122ed9c5b25fef60d04.pts 03624134/expert_verified/points_label/df7a65224f295122ed9c5b25fef60d04.seg 03624134 +03467517/points/df959f68bb22e402a24eeca91f583600.pts 03467517/expert_verified/points_label/df959f68bb22e402a24eeca91f583600.seg 03467517 +04379243/points/69604fc24b7976d69ccce4c6d5bb195f.pts 04379243/expert_verified/points_label/69604fc24b7976d69ccce4c6d5bb195f.seg 04379243 +04379243/points/23aca164c7b2e2d4ad8af6714b643432.pts 04379243/expert_verified/points_label/23aca164c7b2e2d4ad8af6714b643432.seg 04379243 +03636649/points/e37796d40348fa5fd8013bb984303089.pts 03636649/expert_verified/points_label/e37796d40348fa5fd8013bb984303089.seg 03636649 +04379243/points/8cb6a2e9ba365c94593ebeeedbff73b.pts 04379243/expert_verified/points_label/8cb6a2e9ba365c94593ebeeedbff73b.seg 04379243 +03001627/points/d6f2d44c693d2e857062f2d72cde5c95.pts 03001627/expert_verified/points_label/d6f2d44c693d2e857062f2d72cde5c95.seg 03001627 +03948459/points/ed29dd43ad28f042d1987c07c912c6e1.pts 03948459/expert_verified/points_label/ed29dd43ad28f042d1987c07c912c6e1.seg 03948459 +03001627/points/ca01fd0de2534323c594a0e804f37c1a.pts 03001627/expert_verified/points_label/ca01fd0de2534323c594a0e804f37c1a.seg 03001627 +03636649/points/e7b719516449701362525a4d857f099d.pts 03636649/expert_verified/points_label/e7b719516449701362525a4d857f099d.seg 03636649 +02691156/points/bd48d0beb5d1acf1d2106c9042f1bde9.pts 02691156/expert_verified/points_label/bd48d0beb5d1acf1d2106c9042f1bde9.seg 02691156 +03636649/points/7cb828eb3b8e424b1e88064118b89a3e.pts 03636649/expert_verified/points_label/7cb828eb3b8e424b1e88064118b89a3e.seg 03636649 +03001627/points/fdd21f7f2ca9f0bcbdcbca499b446e89.pts 03001627/expert_verified/points_label/fdd21f7f2ca9f0bcbdcbca499b446e89.seg 03001627 +03636649/points/d779977c2417752b815c6de5374a8dd2.pts 03636649/expert_verified/points_label/d779977c2417752b815c6de5374a8dd2.seg 03636649 +02691156/points/f3e2df468c15795872517bb0a6b4d3ef.pts 02691156/expert_verified/points_label/f3e2df468c15795872517bb0a6b4d3ef.seg 02691156 +04379243/points/e3cc0b06be2c972cab610b0c94236463.pts 04379243/expert_verified/points_label/e3cc0b06be2c972cab610b0c94236463.seg 04379243 +03261776/points/ca1c1c9aba8f4491a656de49935d2359.pts 03261776/expert_verified/points_label/ca1c1c9aba8f4491a656de49935d2359.seg 03261776 +03001627/points/c535629f9661293dc16ef5c633c71b56.pts 03001627/expert_verified/points_label/c535629f9661293dc16ef5c633c71b56.seg 03001627 +03636649/points/699fcda4f4e9166ec5eb7aae719027b2.pts 03636649/expert_verified/points_label/699fcda4f4e9166ec5eb7aae719027b2.seg 03636649 +03001627/points/8a5d60067de905336c183a120a388982.pts 03001627/expert_verified/points_label/8a5d60067de905336c183a120a388982.seg 03001627 +02691156/points/4ad92be763c2ded8fca1f1143bb6bc17.pts 02691156/expert_verified/points_label/4ad92be763c2ded8fca1f1143bb6bc17.seg 02691156 +04379243/points/14d6b4b09dfc54e9d679a95896f75103.pts 04379243/expert_verified/points_label/14d6b4b09dfc54e9d679a95896f75103.seg 04379243 +02691156/points/5e9129782c45b26992e39b8eae3e6b15.pts 02691156/expert_verified/points_label/5e9129782c45b26992e39b8eae3e6b15.seg 02691156 +02691156/points/2aec6e6096e640add00d52e62bf14ee9.pts 02691156/expert_verified/points_label/2aec6e6096e640add00d52e62bf14ee9.seg 02691156 +03642806/points/7b4260884a1dfd76b080af510dd640b.pts 03642806/expert_verified/points_label/7b4260884a1dfd76b080af510dd640b.seg 03642806 +03636649/points/3a0edfd418e020b97f32712aef0efc5a.pts 03636649/expert_verified/points_label/3a0edfd418e020b97f32712aef0efc5a.seg 03636649 +03467517/points/1c374a198daaddc493f0194265a9746c.pts 03467517/expert_verified/points_label/1c374a198daaddc493f0194265a9746c.seg 03467517 +04379243/points/9d90a58677e619f94b8710a3469971b1.pts 04379243/expert_verified/points_label/9d90a58677e619f94b8710a3469971b1.seg 04379243 +02691156/points/26f8a11864fd6bf7b68211fcc7956ac6.pts 02691156/expert_verified/points_label/26f8a11864fd6bf7b68211fcc7956ac6.seg 02691156 +02773838/points/f5108ede5ca11f041f6736765dee4fa9.pts 02773838/expert_verified/points_label/f5108ede5ca11f041f6736765dee4fa9.seg 02773838 +03001627/points/41ce60d5443c203eb31c248b8665b2e7.pts 03001627/expert_verified/points_label/41ce60d5443c203eb31c248b8665b2e7.seg 03001627 +03797390/points/a637500654ca8d16c97cfc3e8a6b1d16.pts 03797390/expert_verified/points_label/a637500654ca8d16c97cfc3e8a6b1d16.seg 03797390 +03001627/points/9ee4b9c97bcf4b3715dec43ae6a12831.pts 03001627/expert_verified/points_label/9ee4b9c97bcf4b3715dec43ae6a12831.seg 03001627 +03001627/points/e2dbad7996e7e13430c589758b4b5646.pts 03001627/expert_verified/points_label/e2dbad7996e7e13430c589758b4b5646.seg 03001627 +03001627/points/ec9f1fc13f2e4ae2c3bd24f986301745.pts 03001627/expert_verified/points_label/ec9f1fc13f2e4ae2c3bd24f986301745.seg 03001627 +03624134/points/172b9a77462dcdeaed90ead9558ee6cb.pts 03624134/expert_verified/points_label/172b9a77462dcdeaed90ead9558ee6cb.seg 03624134 +04379243/points/713a4be770bb19b9586b2526565371c0.pts 04379243/expert_verified/points_label/713a4be770bb19b9586b2526565371c0.seg 04379243 +04379243/points/f2e6820ca69d9b7719fb4103277a6b93.pts 04379243/expert_verified/points_label/f2e6820ca69d9b7719fb4103277a6b93.seg 04379243 +03001627/points/11a06e6f68b1d99c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/11a06e6f68b1d99c8687ff9b0b4e4ac.seg 03001627 +04379243/points/cfd7e354a5ae982aa0ab1d82ef09f78f.pts 04379243/expert_verified/points_label/cfd7e354a5ae982aa0ab1d82ef09f78f.seg 04379243 +03797390/points/8012f52dd0a4d2f718a93a45bf780820.pts 03797390/expert_verified/points_label/8012f52dd0a4d2f718a93a45bf780820.seg 03797390 +03636649/points/57c1bc69df779d87bbc7a6acbd8f058b.pts 03636649/expert_verified/points_label/57c1bc69df779d87bbc7a6acbd8f058b.seg 03636649 +03948459/points/664579680dc09267e1f2a1daf140ac9f.pts 03948459/expert_verified/points_label/664579680dc09267e1f2a1daf140ac9f.seg 03948459 +03001627/points/ca032d3b6dcbe1cea3056fa1e8da3997.pts 03001627/expert_verified/points_label/ca032d3b6dcbe1cea3056fa1e8da3997.seg 03001627 +02691156/points/4a837740b388aa45d8ff6111270336a9.pts 02691156/expert_verified/points_label/4a837740b388aa45d8ff6111270336a9.seg 02691156 +04099429/points/64803bab9799d0e698d2d2b2ae2563b0.pts 04099429/expert_verified/points_label/64803bab9799d0e698d2d2b2ae2563b0.seg 04099429 +04379243/points/c2c36909e461e10adaaaeef365d8f6e5.pts 04379243/expert_verified/points_label/c2c36909e461e10adaaaeef365d8f6e5.seg 04379243 +04379243/points/bc842e548e68a3cbb48513409ae7c51d.pts 04379243/expert_verified/points_label/bc842e548e68a3cbb48513409ae7c51d.seg 04379243 +03467517/points/4709e55a82a63f64d57700c05b1862d8.pts 03467517/expert_verified/points_label/4709e55a82a63f64d57700c05b1862d8.seg 03467517 +04379243/points/dc6f030d9ee566a5dcfcef693e7ec696.pts 04379243/expert_verified/points_label/dc6f030d9ee566a5dcfcef693e7ec696.seg 04379243 +03001627/points/8be8093e99b94bd9cf320c31965db5a1.pts 03001627/expert_verified/points_label/8be8093e99b94bd9cf320c31965db5a1.seg 03001627 +02958343/points/a0a1b0377d72e86bab3dd76bf33b0f5e.pts 02958343/expert_verified/points_label/a0a1b0377d72e86bab3dd76bf33b0f5e.seg 02958343 +03001627/points/efc684ff4dc6ff49ccd42a2d6eea63ed.pts 03001627/expert_verified/points_label/efc684ff4dc6ff49ccd42a2d6eea63ed.seg 03001627 +03001627/points/ff2223a085d32243696b74614952b2d0.pts 03001627/expert_verified/points_label/ff2223a085d32243696b74614952b2d0.seg 03001627 +02954340/points/8b2951e32e0906bb5f6cb4951755315c.pts 02954340/expert_verified/points_label/8b2951e32e0906bb5f6cb4951755315c.seg 02954340 +04379243/points/82b69c9b72a5159ce76bc197b3a3ffc0.pts 04379243/expert_verified/points_label/82b69c9b72a5159ce76bc197b3a3ffc0.seg 04379243 +03642806/points/5b5247b13d5b21bdad2954b86711abbd.pts 03642806/expert_verified/points_label/5b5247b13d5b21bdad2954b86711abbd.seg 03642806 +03636649/points/44e442591f82cd4cab0ac374f450cdc.pts 03636649/expert_verified/points_label/44e442591f82cd4cab0ac374f450cdc.seg 03636649 +03001627/points/2a1184b04dd8f30e3e92f39ce48d644.pts 03001627/expert_verified/points_label/2a1184b04dd8f30e3e92f39ce48d644.seg 03001627 +03636649/points/bc49fe3559e18fcb7d910d51d878f708.pts 03636649/expert_verified/points_label/bc49fe3559e18fcb7d910d51d878f708.seg 03636649 +03624134/points/c50af8af50613e822bf26da672b84220.pts 03624134/expert_verified/points_label/c50af8af50613e822bf26da672b84220.seg 03624134 +04225987/points/c0280aaad5473e8398c63cb68f11df34.pts 04225987/expert_verified/points_label/c0280aaad5473e8398c63cb68f11df34.seg 04225987 +03636649/points/5849d1a237cb493c659dda512294c744.pts 03636649/expert_verified/points_label/5849d1a237cb493c659dda512294c744.seg 03636649 +02958343/points/fcd90d547fdeb629f200a72c9245aee7.pts 02958343/expert_verified/points_label/fcd90d547fdeb629f200a72c9245aee7.seg 02958343 +03001627/points/34898c36e711fbde713decb1a0563b12.pts 03001627/expert_verified/points_label/34898c36e711fbde713decb1a0563b12.seg 03001627 +02691156/points/af696fc30a96a0c8bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/af696fc30a96a0c8bc0909d98a1ff2b4.seg 02691156 +04379243/points/f28e030e715b9d3e318462aca9e62b6b.pts 04379243/expert_verified/points_label/f28e030e715b9d3e318462aca9e62b6b.seg 04379243 +02691156/points/3c7e4628a9ea201bbf3143b1cb6076a.pts 02691156/expert_verified/points_label/3c7e4628a9ea201bbf3143b1cb6076a.seg 02691156 +03636649/points/f092117adb1e9254d1cbf3e52b9b6237.pts 03636649/expert_verified/points_label/f092117adb1e9254d1cbf3e52b9b6237.seg 03636649 +04379243/points/7dd881a26eea656d193afeeca14e3baa.pts 04379243/expert_verified/points_label/7dd881a26eea656d193afeeca14e3baa.seg 04379243 +03001627/points/79a3115a6f96eef7c151419181ef256.pts 03001627/expert_verified/points_label/79a3115a6f96eef7c151419181ef256.seg 03001627 +04379243/points/fc51355d4d03ff4ae6c5cd45aa112726.pts 04379243/expert_verified/points_label/fc51355d4d03ff4ae6c5cd45aa112726.seg 04379243 +04379243/points/34121f5cc12135148c1cf3f7d7f0373.pts 04379243/expert_verified/points_label/34121f5cc12135148c1cf3f7d7f0373.seg 04379243 +03624134/points/d5167211e757e79f012465c621a63e3.pts 03624134/expert_verified/points_label/d5167211e757e79f012465c621a63e3.seg 03624134 +04379243/points/5b375eacdbe49cfaaa539cd22945e538.pts 04379243/expert_verified/points_label/5b375eacdbe49cfaaa539cd22945e538.seg 04379243 +02691156/points/d3d788c1fb35227619ba010ddb4974fe.pts 02691156/expert_verified/points_label/d3d788c1fb35227619ba010ddb4974fe.seg 02691156 +02691156/points/f26ea1a00455f44fb88e2a19106395c2.pts 02691156/expert_verified/points_label/f26ea1a00455f44fb88e2a19106395c2.seg 02691156 +03001627/points/798a46965d9e0edfcea003eff0268278.pts 03001627/expert_verified/points_label/798a46965d9e0edfcea003eff0268278.seg 03001627 +02691156/points/3069d990d52051eb3a34c2907e8f3f1f.pts 02691156/expert_verified/points_label/3069d990d52051eb3a34c2907e8f3f1f.seg 02691156 +02691156/points/8c42e3042a4beaa7d5c40787c7bb7824.pts 02691156/expert_verified/points_label/8c42e3042a4beaa7d5c40787c7bb7824.seg 02691156 +04379243/points/45c5ee611c73b90a509330ce00eb0b20.pts 04379243/expert_verified/points_label/45c5ee611c73b90a509330ce00eb0b20.seg 04379243 +03001627/points/22ada577361ed0374b3c42e318f3affc.pts 03001627/expert_verified/points_label/22ada577361ed0374b3c42e318f3affc.seg 03001627 +04379243/points/b6ad7be371729438dcfcef693e7ec696.pts 04379243/expert_verified/points_label/b6ad7be371729438dcfcef693e7ec696.seg 04379243 +03636649/points/4c266f2b866c59e761fef32872c6fa53.pts 03636649/expert_verified/points_label/4c266f2b866c59e761fef32872c6fa53.seg 03636649 +04379243/points/812dd06fc99f174e9f2349486c570dd4.pts 04379243/expert_verified/points_label/812dd06fc99f174e9f2349486c570dd4.seg 04379243 +02691156/points/36a5bd4ca6a0b191532d23702363f9a5.pts 02691156/expert_verified/points_label/36a5bd4ca6a0b191532d23702363f9a5.seg 02691156 +03001627/points/be0890a6a0f3fcf841f91bc9e1dece3b.pts 03001627/expert_verified/points_label/be0890a6a0f3fcf841f91bc9e1dece3b.seg 03001627 +03642806/points/6008f256f3beafd9988abef1fd117e7.pts 03642806/expert_verified/points_label/6008f256f3beafd9988abef1fd117e7.seg 03642806 +03001627/points/490941bf4a532b62492d9da2668ec34c.pts 03001627/expert_verified/points_label/490941bf4a532b62492d9da2668ec34c.seg 03001627 +03636649/points/94940283714fdff6244ba644cf33cb2e.pts 03636649/expert_verified/points_label/94940283714fdff6244ba644cf33cb2e.seg 03636649 +03642806/points/6227e7dd1a391e8d54f22ce0a3592d5.pts 03642806/expert_verified/points_label/6227e7dd1a391e8d54f22ce0a3592d5.seg 03642806 +02691156/points/b2ceeee3c5b75962ac4f72bf08dc79a6.pts 02691156/expert_verified/points_label/b2ceeee3c5b75962ac4f72bf08dc79a6.seg 02691156 +03642806/points/55a05b33f34e7211f71cb38553f14917.pts 03642806/expert_verified/points_label/55a05b33f34e7211f71cb38553f14917.seg 03642806 +02773838/points/74c548ef3ca7b1987515e7bb7dba4019.pts 02773838/expert_verified/points_label/74c548ef3ca7b1987515e7bb7dba4019.seg 02773838 +03467517/points/defcf80fcef4b51b3f431ca2c1260d62.pts 03467517/expert_verified/points_label/defcf80fcef4b51b3f431ca2c1260d62.seg 03467517 +04379243/points/eaea1cf98b61abd043383304411cc9ec.pts 04379243/expert_verified/points_label/eaea1cf98b61abd043383304411cc9ec.seg 04379243 +03001627/points/7f6858bd9d4af9df97316612e1a4343a.pts 03001627/expert_verified/points_label/7f6858bd9d4af9df97316612e1a4343a.seg 03001627 +03001627/points/3c27660aacbcf99886327adaa986dff.pts 03001627/expert_verified/points_label/3c27660aacbcf99886327adaa986dff.seg 03001627 +04379243/points/229d510bace435811572ee5ddf1b55b.pts 04379243/expert_verified/points_label/229d510bace435811572ee5ddf1b55b.seg 04379243 +03636649/points/83c0ad378b5802b73d39d8012919dd25.pts 03636649/expert_verified/points_label/83c0ad378b5802b73d39d8012919dd25.seg 03636649 +02691156/points/f009f3112625ee00b8cf782e8c539948.pts 02691156/expert_verified/points_label/f009f3112625ee00b8cf782e8c539948.seg 02691156 +02691156/points/f13827d156628467b4cdad9a5bf52dd5.pts 02691156/expert_verified/points_label/f13827d156628467b4cdad9a5bf52dd5.seg 02691156 +03636649/points/526251a7530426a4b3c42e318f3affc.pts 03636649/expert_verified/points_label/526251a7530426a4b3c42e318f3affc.seg 03636649 +03001627/points/a1133464132d65fcfce0ccdae30f97db.pts 03001627/expert_verified/points_label/a1133464132d65fcfce0ccdae30f97db.seg 03001627 +02691156/points/d844094b073a0452b04b2d1c5ce9783b.pts 02691156/expert_verified/points_label/d844094b073a0452b04b2d1c5ce9783b.seg 02691156 +03948459/points/2f5b4bcb8d4dd901609e2d916fa0da27.pts 03948459/expert_verified/points_label/2f5b4bcb8d4dd901609e2d916fa0da27.seg 03948459 +03636649/points/a4c06cd5032733af543df75232f6ff2b.pts 03636649/expert_verified/points_label/a4c06cd5032733af543df75232f6ff2b.seg 03636649 +03636649/points/64eaa45bd2e01db8991ff09eca5b27a8.pts 03636649/expert_verified/points_label/64eaa45bd2e01db8991ff09eca5b27a8.seg 03636649 +03636649/points/5bc478e9c4e0bb8180936c51aa7ffcf5.pts 03636649/expert_verified/points_label/5bc478e9c4e0bb8180936c51aa7ffcf5.seg 03636649 +03636649/points/b02bd8e5ef9cfe354b3c42e318f3affc.pts 03636649/expert_verified/points_label/b02bd8e5ef9cfe354b3c42e318f3affc.seg 03636649 +03636649/points/cf6c082b9534049494db33559ec0df30.pts 03636649/expert_verified/points_label/cf6c082b9534049494db33559ec0df30.seg 03636649 +04225987/points/af4343c5b78b70b11082f2ea630bf69e.pts 04225987/expert_verified/points_label/af4343c5b78b70b11082f2ea630bf69e.seg 04225987 +03467517/points/c084022f2ddbf95493f0194265a9746c.pts 03467517/expert_verified/points_label/c084022f2ddbf95493f0194265a9746c.seg 03467517 +03001627/points/550dd11407c28f9f3bd04286517a8395.pts 03001627/expert_verified/points_label/550dd11407c28f9f3bd04286517a8395.seg 03001627 +04379243/points/702cebffa33a19f019f079d1b712f46f.pts 04379243/expert_verified/points_label/702cebffa33a19f019f079d1b712f46f.seg 04379243 +04379243/points/388d9e7b2b8a8f909492fbce0bd54e2e.pts 04379243/expert_verified/points_label/388d9e7b2b8a8f909492fbce0bd54e2e.seg 04379243 +03636649/points/7634fbdcaa6b304d62c83ac1e3a4ebaa.pts 03636649/expert_verified/points_label/7634fbdcaa6b304d62c83ac1e3a4ebaa.seg 03636649 +03636649/points/14d3d2418165ec86bba785994a529f86.pts 03636649/expert_verified/points_label/14d3d2418165ec86bba785994a529f86.seg 03636649 +04379243/points/13e19274b358ec867aa3000697a75d55.pts 04379243/expert_verified/points_label/13e19274b358ec867aa3000697a75d55.seg 04379243 +03467517/points/727fcc85add981325e683993f34d42f2.pts 03467517/expert_verified/points_label/727fcc85add981325e683993f34d42f2.seg 03467517 +02691156/points/947d6b9cd1966e2e719b5362fe06bbb.pts 02691156/expert_verified/points_label/947d6b9cd1966e2e719b5362fe06bbb.seg 02691156 +04379243/points/ee5f85db427865e63e5399147a5b4763.pts 04379243/expert_verified/points_label/ee5f85db427865e63e5399147a5b4763.seg 04379243 +02691156/points/1678946724380812de689e373096b0e3.pts 02691156/expert_verified/points_label/1678946724380812de689e373096b0e3.seg 02691156 +03001627/points/3fdef0a7606c397331ad067823a3f0ce.pts 03001627/expert_verified/points_label/3fdef0a7606c397331ad067823a3f0ce.seg 03001627 +03636649/points/1bb465b8f22315d1116f219d90a571c2.pts 03636649/expert_verified/points_label/1bb465b8f22315d1116f219d90a571c2.seg 03636649 +04379243/points/9dd5b7e6f90ee322b56d92c5d7b06038.pts 04379243/expert_verified/points_label/9dd5b7e6f90ee322b56d92c5d7b06038.seg 04379243 +03467517/points/7eee3b79e053759143891ae68a82472e.pts 03467517/expert_verified/points_label/7eee3b79e053759143891ae68a82472e.seg 03467517 +03001627/points/f4b6bf9253918b52944d8f8e13d63fde.pts 03001627/expert_verified/points_label/f4b6bf9253918b52944d8f8e13d63fde.seg 03001627 +03636649/points/92e0f64c08f0c8ac3c8d0fdfb1cc2535.pts 03636649/expert_verified/points_label/92e0f64c08f0c8ac3c8d0fdfb1cc2535.seg 03636649 +03624134/points/d63521a0dfac9c1f342494fa6f09f376.pts 03624134/expert_verified/points_label/d63521a0dfac9c1f342494fa6f09f376.seg 03624134 +04379243/points/c7ff0afab4b7885a52160ba64fb535b2.pts 04379243/expert_verified/points_label/c7ff0afab4b7885a52160ba64fb535b2.seg 04379243 +02958343/points/89765af115d9a4955591fcdffe729c55.pts 02958343/expert_verified/points_label/89765af115d9a4955591fcdffe729c55.seg 02958343 +03636649/points/70bf2aaedbf9499ec889c00efdaf9928.pts 03636649/expert_verified/points_label/70bf2aaedbf9499ec889c00efdaf9928.seg 03636649 +02958343/points/ef15b938dcfa9893c4d922e8a1141322.pts 02958343/expert_verified/points_label/ef15b938dcfa9893c4d922e8a1141322.seg 02958343 +03636649/points/4bb676c497969016de98d10ab5975b59.pts 03636649/expert_verified/points_label/4bb676c497969016de98d10ab5975b59.seg 03636649 +04379243/points/1c8121e1ad6cd6fc7a480f3f1d55ed3f.pts 04379243/expert_verified/points_label/1c8121e1ad6cd6fc7a480f3f1d55ed3f.seg 04379243 +04379243/points/83b8e64089968ae8fd3feb4581507302.pts 04379243/expert_verified/points_label/83b8e64089968ae8fd3feb4581507302.seg 04379243 +03636649/points/a4c0f3aed58f0e092fdae21c212bf119.pts 03636649/expert_verified/points_label/a4c0f3aed58f0e092fdae21c212bf119.seg 03636649 +04379243/points/e02925509615eb5a4eaf5bbf36d243d4.pts 04379243/expert_verified/points_label/e02925509615eb5a4eaf5bbf36d243d4.seg 04379243 +04379243/points/c5087fce38b009ae30bbd4cddd04c77b.pts 04379243/expert_verified/points_label/c5087fce38b009ae30bbd4cddd04c77b.seg 04379243 +03001627/points/5107542cfbf142f36209799e55a657c.pts 03001627/expert_verified/points_label/5107542cfbf142f36209799e55a657c.seg 03001627 +04379243/points/94a62cfdb84e88ca9a3528690d225ee1.pts 04379243/expert_verified/points_label/94a62cfdb84e88ca9a3528690d225ee1.seg 04379243 +04379243/points/80ad1f839582d183fbf6f493308acc40.pts 04379243/expert_verified/points_label/80ad1f839582d183fbf6f493308acc40.seg 04379243 +03001627/points/91819d15c2c044ebd47ffa500636d198.pts 03001627/expert_verified/points_label/91819d15c2c044ebd47ffa500636d198.seg 03001627 +03636649/points/77a5a12147a6624d786810c22b062a88.pts 03636649/expert_verified/points_label/77a5a12147a6624d786810c22b062a88.seg 03636649 +03001627/points/beb4c42cfa1c3b282811d30bba54859.pts 03001627/expert_verified/points_label/beb4c42cfa1c3b282811d30bba54859.seg 03001627 +03636649/points/e529fc190753cc9df647dc544bb0ab61.pts 03636649/expert_verified/points_label/e529fc190753cc9df647dc544bb0ab61.seg 03636649 +04379243/points/680d4a8b5a30601a4b3c42e318f3affc.pts 04379243/expert_verified/points_label/680d4a8b5a30601a4b3c42e318f3affc.seg 04379243 +03001627/points/1d6f4020cab4ec1962d6a66a1a314d66.pts 03001627/expert_verified/points_label/1d6f4020cab4ec1962d6a66a1a314d66.seg 03001627 +03001627/points/5b3fd3199d1bc950c1ae25a29e9d46d3.pts 03001627/expert_verified/points_label/5b3fd3199d1bc950c1ae25a29e9d46d3.seg 03001627 +03001627/points/17e916fc863540ee3def89b32cef8e45.pts 03001627/expert_verified/points_label/17e916fc863540ee3def89b32cef8e45.seg 03001627 +04379243/points/a5d5fc6b0bb7881419fb4103277a6b93.pts 04379243/expert_verified/points_label/a5d5fc6b0bb7881419fb4103277a6b93.seg 04379243 +03001627/points/eafec1b145972dcd815b2b467e8e2eac.pts 03001627/expert_verified/points_label/eafec1b145972dcd815b2b467e8e2eac.seg 03001627 +04379243/points/1fb2be490f45ec6e19fb4103277a6b93.pts 04379243/expert_verified/points_label/1fb2be490f45ec6e19fb4103277a6b93.seg 04379243 +02691156/points/8b61ba80d9e487deca8607f540cc62ba.pts 02691156/expert_verified/points_label/8b61ba80d9e487deca8607f540cc62ba.seg 02691156 +03467517/points/2d767b3fbb8a3053b8836869016d1afd.pts 03467517/expert_verified/points_label/2d767b3fbb8a3053b8836869016d1afd.seg 03467517 +04379243/points/e0940f2229e42007d98e761e6d91dfc8.pts 04379243/expert_verified/points_label/e0940f2229e42007d98e761e6d91dfc8.seg 04379243 +03001627/points/bb90094030f369e4305a3b2fd9173d6f.pts 03001627/expert_verified/points_label/bb90094030f369e4305a3b2fd9173d6f.seg 03001627 +02958343/points/c6e3d9cf26016b5752aa494042b7c9db.pts 02958343/expert_verified/points_label/c6e3d9cf26016b5752aa494042b7c9db.seg 02958343 +03001627/points/bd0fab2e72b445bd1e722bceee6e83aa.pts 03001627/expert_verified/points_label/bd0fab2e72b445bd1e722bceee6e83aa.seg 03001627 +02691156/points/e86fd13a49f0ee0a62b600da24e0965.pts 02691156/expert_verified/points_label/e86fd13a49f0ee0a62b600da24e0965.seg 02691156 +03001627/points/eeebe3fe14ee4d3aebefe6b1d594ad2e.pts 03001627/expert_verified/points_label/eeebe3fe14ee4d3aebefe6b1d594ad2e.seg 03001627 +04379243/points/398dbb0a34ca527871a782a4379556c7.pts 04379243/expert_verified/points_label/398dbb0a34ca527871a782a4379556c7.seg 04379243 +04379243/points/737cc2beda4a023619fb4103277a6b93.pts 04379243/expert_verified/points_label/737cc2beda4a023619fb4103277a6b93.seg 04379243 +03001627/points/3895b96949fd81c5f07fee5fc5c45ee2.pts 03001627/expert_verified/points_label/3895b96949fd81c5f07fee5fc5c45ee2.seg 03001627 +04379243/points/bba5ce8555c8fa89ba18ade30e563d37.pts 04379243/expert_verified/points_label/bba5ce8555c8fa89ba18ade30e563d37.seg 04379243 +04379243/points/cab027dd0162c5b7f1426260885dd0ef.pts 04379243/expert_verified/points_label/cab027dd0162c5b7f1426260885dd0ef.seg 04379243 +04379243/points/75f2bc98aecf198974984b9cd0997a52.pts 04379243/expert_verified/points_label/75f2bc98aecf198974984b9cd0997a52.seg 04379243 +04379243/points/8d4fe49d942ec85ff4b6538438a0b930.pts 04379243/expert_verified/points_label/8d4fe49d942ec85ff4b6538438a0b930.seg 04379243 +03001627/points/89dd53d0377c28207f7114254c4286d2.pts 03001627/expert_verified/points_label/89dd53d0377c28207f7114254c4286d2.seg 03001627 +03636649/points/a37695d83a39adb52866fbd701f50f71.pts 03636649/expert_verified/points_label/a37695d83a39adb52866fbd701f50f71.seg 03636649 +04379243/points/f99ebf0f053140525a0e5699b3040a35.pts 04379243/expert_verified/points_label/f99ebf0f053140525a0e5699b3040a35.seg 04379243 +03624134/points/bbfd2df3edce576e1e652fa812161367.pts 03624134/expert_verified/points_label/bbfd2df3edce576e1e652fa812161367.seg 03624134 +04379243/points/f0d8620b49ea76db83130614d8020b3.pts 04379243/expert_verified/points_label/f0d8620b49ea76db83130614d8020b3.seg 04379243 +04379243/points/d01a6b35a54c8f77dd986a55e273fa14.pts 04379243/expert_verified/points_label/d01a6b35a54c8f77dd986a55e273fa14.seg 04379243 +03001627/points/2f6b0ddf12d1311795bea7c29e873d16.pts 03001627/expert_verified/points_label/2f6b0ddf12d1311795bea7c29e873d16.seg 03001627 +03001627/points/5695fd37d1e673cebf964fc57f6a7d6d.pts 03001627/expert_verified/points_label/5695fd37d1e673cebf964fc57f6a7d6d.seg 03001627 +03636649/points/746b82746c6a02cca5f600ed2cf472ac.pts 03636649/expert_verified/points_label/746b82746c6a02cca5f600ed2cf472ac.seg 03636649 +03001627/points/bcc4ea0133864bfe4d4c0769270d8651.pts 03001627/expert_verified/points_label/bcc4ea0133864bfe4d4c0769270d8651.seg 03001627 +03624134/points/81ba3f06ec38eaa46016d22b1dfacd4b.pts 03624134/expert_verified/points_label/81ba3f06ec38eaa46016d22b1dfacd4b.seg 03624134 +04379243/points/2a2d6560f14a01c6afac72146bbc9d59.pts 04379243/expert_verified/points_label/2a2d6560f14a01c6afac72146bbc9d59.seg 04379243 +04379243/points/856e86709df98497dcfcef693e7ec696.pts 04379243/expert_verified/points_label/856e86709df98497dcfcef693e7ec696.seg 04379243 +03948459/points/7418810de4b13e8430b6ca3ac82edfa3.pts 03948459/expert_verified/points_label/7418810de4b13e8430b6ca3ac82edfa3.seg 03948459 +03001627/points/11e0f0dfd3d0b22130ddb6ead95f49cc.pts 03001627/expert_verified/points_label/11e0f0dfd3d0b22130ddb6ead95f49cc.seg 03001627 +04379243/points/5c6748b094725d9af008d8a3590fb522.pts 04379243/expert_verified/points_label/5c6748b094725d9af008d8a3590fb522.seg 04379243 +04379243/points/17f3a2945d6550cbf7628281ecb18112.pts 04379243/expert_verified/points_label/17f3a2945d6550cbf7628281ecb18112.seg 04379243 +04379243/points/889c9aedc4ba47592fb02b79d375eea5.pts 04379243/expert_verified/points_label/889c9aedc4ba47592fb02b79d375eea5.seg 04379243 +04379243/points/c0b74c61865b563067dc358060e3c47b.pts 04379243/expert_verified/points_label/c0b74c61865b563067dc358060e3c47b.seg 04379243 +03636649/points/783b81aa54a69a26d42b9650f19dd425.pts 03636649/expert_verified/points_label/783b81aa54a69a26d42b9650f19dd425.seg 03636649 +03467517/points/8b8b084109eef6d81082f2ea630bf69e.pts 03467517/expert_verified/points_label/8b8b084109eef6d81082f2ea630bf69e.seg 03467517 +03001627/points/8a9af7d8a83d90fcd53e36731300f5b4.pts 03001627/expert_verified/points_label/8a9af7d8a83d90fcd53e36731300f5b4.seg 03001627 +03001627/points/47aca56ff3a7b8a71a782a4379556c7.pts 03001627/expert_verified/points_label/47aca56ff3a7b8a71a782a4379556c7.seg 03001627 +03001627/points/9fae8d94a028e9ec2818b21315fe1bde.pts 03001627/expert_verified/points_label/9fae8d94a028e9ec2818b21315fe1bde.seg 03001627 +03001627/points/9a41550ba7dd31e3bf80985a99195eb8.pts 03001627/expert_verified/points_label/9a41550ba7dd31e3bf80985a99195eb8.seg 03001627 +03001627/points/184b4797cea77beb5ca1c42bb8ac17a.pts 03001627/expert_verified/points_label/184b4797cea77beb5ca1c42bb8ac17a.seg 03001627 +04379243/points/bc1ff7fc750617d690f7bef12e52ac08.pts 04379243/expert_verified/points_label/bc1ff7fc750617d690f7bef12e52ac08.seg 04379243 +02691156/points/5fb64e3fc0abe449ca8607f540cc62ba.pts 02691156/expert_verified/points_label/5fb64e3fc0abe449ca8607f540cc62ba.seg 02691156 +03001627/points/2e0beb3b6927a2b7e45ef4135c266a12.pts 03001627/expert_verified/points_label/2e0beb3b6927a2b7e45ef4135c266a12.seg 03001627 +03467517/points/a38684b166ce2c77c155f88004a92bc8.pts 03467517/expert_verified/points_label/a38684b166ce2c77c155f88004a92bc8.seg 03467517 +02691156/points/b590adb6d3486f6e90b1d6deb98feec6.pts 02691156/expert_verified/points_label/b590adb6d3486f6e90b1d6deb98feec6.seg 02691156 +03636649/points/9d41e23f00d11d153033d35b49a20c8.pts 03636649/expert_verified/points_label/9d41e23f00d11d153033d35b49a20c8.seg 03636649 +03001627/points/f4b141ab64a6c4e771a782a4379556c7.pts 03001627/expert_verified/points_label/f4b141ab64a6c4e771a782a4379556c7.seg 03001627 +03948459/points/19e45672a3109f18be4927dbd39f74e9.pts 03948459/expert_verified/points_label/19e45672a3109f18be4927dbd39f74e9.seg 03948459 +04379243/points/58475b1b20ece0c5eeb8d422649e5f2b.pts 04379243/expert_verified/points_label/58475b1b20ece0c5eeb8d422649e5f2b.seg 04379243 +04379243/points/400393a56fc243c442c39a4fb8d01418.pts 04379243/expert_verified/points_label/400393a56fc243c442c39a4fb8d01418.seg 04379243 +03001627/points/a128eda00983dd01fb7d9615be5ab4b0.pts 03001627/expert_verified/points_label/a128eda00983dd01fb7d9615be5ab4b0.seg 03001627 +04379243/points/6af9a593129b028eb67e68783d58425a.pts 04379243/expert_verified/points_label/6af9a593129b028eb67e68783d58425a.seg 04379243 +03001627/points/40f188600cf8362b654ea6737b0d3597.pts 03001627/expert_verified/points_label/40f188600cf8362b654ea6737b0d3597.seg 03001627 +04379243/points/a4af8f822fa8d95456c08464b83f209e.pts 04379243/expert_verified/points_label/a4af8f822fa8d95456c08464b83f209e.seg 04379243 +03001627/points/d9558dccfe8e3381e45ef4135c266a12.pts 03001627/expert_verified/points_label/d9558dccfe8e3381e45ef4135c266a12.seg 03001627 +04379243/points/631028ddb76eed4dbb0085d0daabdaea.pts 04379243/expert_verified/points_label/631028ddb76eed4dbb0085d0daabdaea.seg 04379243 +03001627/points/8967e65c1541d1874aa7f42ef07f614e.pts 03001627/expert_verified/points_label/8967e65c1541d1874aa7f42ef07f614e.seg 03001627 +04379243/points/38feb6b209579f6faadbf8208284c675.pts 04379243/expert_verified/points_label/38feb6b209579f6faadbf8208284c675.seg 04379243 +03624134/points/60277f4060b8703e4e18d7136dc2dc80.pts 03624134/expert_verified/points_label/60277f4060b8703e4e18d7136dc2dc80.seg 03624134 +03467517/points/a78c3356a5dca4e7670b811945485012.pts 03467517/expert_verified/points_label/a78c3356a5dca4e7670b811945485012.seg 03467517 +03797390/points/645b0e2ef3b95979204df312eabf367f.pts 03797390/expert_verified/points_label/645b0e2ef3b95979204df312eabf367f.seg 03797390 +03467517/points/bd6057c7ac1ef31193f0194265a9746c.pts 03467517/expert_verified/points_label/bd6057c7ac1ef31193f0194265a9746c.seg 03467517 +03790512/points/bcbcfdad5e0e1d9ba88e8cb97b773125.pts 03790512/expert_verified/points_label/bcbcfdad5e0e1d9ba88e8cb97b773125.seg 03790512 +03636649/points/761fb0822bb05bc8ee0cd746086d989.pts 03636649/expert_verified/points_label/761fb0822bb05bc8ee0cd746086d989.seg 03636649 +03636649/points/be13324c84d2a9d72b151d8b52c53b90.pts 03636649/expert_verified/points_label/be13324c84d2a9d72b151d8b52c53b90.seg 03636649 +04379243/points/7b3dfbd70333485d219a1300d9489f4e.pts 04379243/expert_verified/points_label/7b3dfbd70333485d219a1300d9489f4e.seg 04379243 +04379243/points/22c5cbe6271736bffebad4f49b26ec52.pts 04379243/expert_verified/points_label/22c5cbe6271736bffebad4f49b26ec52.seg 04379243 +02958343/points/4b7b3b54dc04df53c19f1e8ed99ac2fa.pts 02958343/expert_verified/points_label/4b7b3b54dc04df53c19f1e8ed99ac2fa.seg 02958343 +03636649/points/947c6753d77d8082290e2f84c414e6be.pts 03636649/expert_verified/points_label/947c6753d77d8082290e2f84c414e6be.seg 03636649 +02958343/points/36c2770d00fdd0bdf1ee968c9039cc3.pts 02958343/expert_verified/points_label/36c2770d00fdd0bdf1ee968c9039cc3.seg 02958343 +03001627/points/4ac17ecd78880859e302b6082b0ffc09.pts 03001627/expert_verified/points_label/4ac17ecd78880859e302b6082b0ffc09.seg 03001627 +03636649/points/70b78b9439a9de7530f6e0ede20c4525.pts 03636649/expert_verified/points_label/70b78b9439a9de7530f6e0ede20c4525.seg 03636649 +04379243/points/d8be4b45afb21cf1616fb9ab42452112.pts 04379243/expert_verified/points_label/d8be4b45afb21cf1616fb9ab42452112.seg 04379243 +02691156/points/fe266c740580c102ff9ce0c50c2cd25a.pts 02691156/expert_verified/points_label/fe266c740580c102ff9ce0c50c2cd25a.seg 02691156 +02958343/points/30f4617775480bcce27281f3b76d1f5.pts 02958343/expert_verified/points_label/30f4617775480bcce27281f3b76d1f5.seg 02958343 +03467517/points/34874708b51c7ed493f0194265a9746c.pts 03467517/expert_verified/points_label/34874708b51c7ed493f0194265a9746c.seg 03467517 +04225987/points/abdc4a823b1f78c397f47f3057557cbe.pts 04225987/expert_verified/points_label/abdc4a823b1f78c397f47f3057557cbe.seg 04225987 +03948459/points/14fe99eb0c105a90fc9c56fb43681c11.pts 03948459/expert_verified/points_label/14fe99eb0c105a90fc9c56fb43681c11.seg 03948459 +04379243/points/f5aecb6607876495e03eb69820d1aaf2.pts 04379243/expert_verified/points_label/f5aecb6607876495e03eb69820d1aaf2.seg 04379243 +03001627/points/3c81fab5678a3872327289c00b6dc9ca.pts 03001627/expert_verified/points_label/3c81fab5678a3872327289c00b6dc9ca.seg 03001627 +04379243/points/fe3351c94fbab8ce3002761e7a3ba3bd.pts 04379243/expert_verified/points_label/fe3351c94fbab8ce3002761e7a3ba3bd.seg 04379243 +04379243/points/5f0c33039269b7a9f0e84b9d9ad447e2.pts 04379243/expert_verified/points_label/5f0c33039269b7a9f0e84b9d9ad447e2.seg 04379243 +03001627/points/fa7347547e290732bf65e1af50b5b7d4.pts 03001627/expert_verified/points_label/fa7347547e290732bf65e1af50b5b7d4.seg 03001627 +04379243/points/9c33336af33fd905776d8bc79b9caa2c.pts 04379243/expert_verified/points_label/9c33336af33fd905776d8bc79b9caa2c.seg 04379243 +03001627/points/1d828c69106609f8cd783766d090e665.pts 03001627/expert_verified/points_label/1d828c69106609f8cd783766d090e665.seg 03001627 +04379243/points/5fbb7a5f01f646ca5830980abc1c717a.pts 04379243/expert_verified/points_label/5fbb7a5f01f646ca5830980abc1c717a.seg 04379243 +03636649/points/777a686890d74b350359b4e03cfdfa.pts 03636649/expert_verified/points_label/777a686890d74b350359b4e03cfdfa.seg 03636649 +02773838/points/3077a9b76724b6d35de21284bb515a83.pts 02773838/expert_verified/points_label/3077a9b76724b6d35de21284bb515a83.seg 02773838 +03642806/points/b233163860361eda8cfacef5204026d6.pts 03642806/expert_verified/points_label/b233163860361eda8cfacef5204026d6.seg 03642806 +02958343/points/f10f279643fbb3276a78cd0552215cff.pts 02958343/expert_verified/points_label/f10f279643fbb3276a78cd0552215cff.seg 02958343 +02691156/points/2c64c521c114df40e51f766854841067.pts 02691156/expert_verified/points_label/2c64c521c114df40e51f766854841067.seg 02691156 +03001627/points/3b8f2b955ee9a904b3c42e318f3affc.pts 03001627/expert_verified/points_label/3b8f2b955ee9a904b3c42e318f3affc.seg 03001627 +04379243/points/2a64bd38a4e42f33dc43fde5155b3946.pts 04379243/expert_verified/points_label/2a64bd38a4e42f33dc43fde5155b3946.seg 04379243 +03001627/points/52310bca00e6a3671201d487ecde379e.pts 03001627/expert_verified/points_label/52310bca00e6a3671201d487ecde379e.seg 03001627 +03001627/points/5346017af72c1843169d299c5f567c18.pts 03001627/expert_verified/points_label/5346017af72c1843169d299c5f567c18.seg 03001627 +02954340/points/c1436c38beba0005284432ce2f42f498.pts 02954340/expert_verified/points_label/c1436c38beba0005284432ce2f42f498.seg 02954340 +03636649/points/34ce1de178694f87e76bc197b3a3ffc0.pts 03636649/expert_verified/points_label/34ce1de178694f87e76bc197b3a3ffc0.seg 03636649 +03001627/points/8e7714615a4b1e6f82390c5f604e0d9b.pts 03001627/expert_verified/points_label/8e7714615a4b1e6f82390c5f604e0d9b.seg 03001627 +03948459/points/a3e6dcfc074489fd8ec2966c0323533e.pts 03948459/expert_verified/points_label/a3e6dcfc074489fd8ec2966c0323533e.seg 03948459 +02691156/points/3ad337dcef167024fe6302fece358e4a.pts 02691156/expert_verified/points_label/3ad337dcef167024fe6302fece358e4a.seg 02691156 +04379243/points/124cc3b92266c2767156f312cf4e035e.pts 04379243/expert_verified/points_label/124cc3b92266c2767156f312cf4e035e.seg 04379243 +04379243/points/ee5f0411fcff59951105a3fc18779f13.pts 04379243/expert_verified/points_label/ee5f0411fcff59951105a3fc18779f13.seg 04379243 +04379243/points/b1117a83ebf5a4c9c337a931444a5063.pts 04379243/expert_verified/points_label/b1117a83ebf5a4c9c337a931444a5063.seg 04379243 +03001627/points/fb847cd696ec711197f2016c3d6097c9.pts 03001627/expert_verified/points_label/fb847cd696ec711197f2016c3d6097c9.seg 03001627 +02691156/points/50da48c8e7644508fca1f1143bb6bc17.pts 02691156/expert_verified/points_label/50da48c8e7644508fca1f1143bb6bc17.seg 02691156 +02958343/points/78c0bec338fa1c01d6b98bf27ff43caf.pts 02958343/expert_verified/points_label/78c0bec338fa1c01d6b98bf27ff43caf.seg 02958343 +02691156/points/37fbd275a734ec1b66cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/37fbd275a734ec1b66cf1b4a8fc3914e.seg 02691156 +03636649/points/e053e531fc4341b5fcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/e053e531fc4341b5fcb8d8c6d4df8143.seg 03636649 +02691156/points/3db61220251b3c9de719b5362fe06bbb.pts 02691156/expert_verified/points_label/3db61220251b3c9de719b5362fe06bbb.seg 02691156 +03642806/points/a7f983f1d0642745135a402b573354e4.pts 03642806/expert_verified/points_label/a7f983f1d0642745135a402b573354e4.seg 03642806 +03001627/points/4e26eab28703c12bdd5f3f2440a93d21.pts 03001627/expert_verified/points_label/4e26eab28703c12bdd5f3f2440a93d21.seg 03001627 +04225987/points/24e46e195f4907887a70e5e6aa241c88.pts 04225987/expert_verified/points_label/24e46e195f4907887a70e5e6aa241c88.seg 04225987 +02691156/points/3ab1e94b6c3a1730c56cc5a87f567365.pts 02691156/expert_verified/points_label/3ab1e94b6c3a1730c56cc5a87f567365.seg 02691156 +03001627/points/61b984febe54b752d61420a53a0cb96d.pts 03001627/expert_verified/points_label/61b984febe54b752d61420a53a0cb96d.seg 03001627 +04379243/points/adf574f947f00bdd548b2639ebc3e759.pts 04379243/expert_verified/points_label/adf574f947f00bdd548b2639ebc3e759.seg 04379243 +03001627/points/ef76b9cbf76bad40586ef70b3cee4240.pts 03001627/expert_verified/points_label/ef76b9cbf76bad40586ef70b3cee4240.seg 03001627 +04379243/points/abef0c609ad3e9c2edea4b985280bcc1.pts 04379243/expert_verified/points_label/abef0c609ad3e9c2edea4b985280bcc1.seg 04379243 +02773838/points/1b84dededd445058e44a5473032f38f.pts 02773838/expert_verified/points_label/1b84dededd445058e44a5473032f38f.seg 02773838 +04379243/points/cd09a9641ea97d873823cce3247aa03b.pts 04379243/expert_verified/points_label/cd09a9641ea97d873823cce3247aa03b.seg 04379243 +03636649/points/6aa1ce4e245001589f1a71e46bbde97c.pts 03636649/expert_verified/points_label/6aa1ce4e245001589f1a71e46bbde97c.seg 03636649 +04379243/points/bb1aa2cdf216d348e76bc197b3a3ffc0.pts 04379243/expert_verified/points_label/bb1aa2cdf216d348e76bc197b3a3ffc0.seg 04379243 +04379243/points/da1e75a8647bfd919778416969ddad32.pts 04379243/expert_verified/points_label/da1e75a8647bfd919778416969ddad32.seg 04379243 +02958343/points/3d0308da43d52e3ef56f8ea3d9016e55.pts 02958343/expert_verified/points_label/3d0308da43d52e3ef56f8ea3d9016e55.seg 02958343 +04379243/points/1ca75076bcebfac76c3484ac7eef025f.pts 04379243/expert_verified/points_label/1ca75076bcebfac76c3484ac7eef025f.seg 04379243 +02691156/points/97ec5b82d9757b639cb1b92881e8e76.pts 02691156/expert_verified/points_label/97ec5b82d9757b639cb1b92881e8e76.seg 02691156 +02691156/points/75db11c354c6342aad01ec966c80ac91.pts 02691156/expert_verified/points_label/75db11c354c6342aad01ec966c80ac91.seg 02691156 +02691156/points/caf80ecbad22a7384e1799d9d4d697c3.pts 02691156/expert_verified/points_label/caf80ecbad22a7384e1799d9d4d697c3.seg 02691156 +03001627/points/d6e0a95f00c7af6fbae0ffb97058b7cc.pts 03001627/expert_verified/points_label/d6e0a95f00c7af6fbae0ffb97058b7cc.seg 03001627 +04379243/points/fa72e9cf7308066b1c072ac0b83fe07a.pts 04379243/expert_verified/points_label/fa72e9cf7308066b1c072ac0b83fe07a.seg 04379243 +03790512/points/455485399ab75f93429f1c522640e6f0.pts 03790512/expert_verified/points_label/455485399ab75f93429f1c522640e6f0.seg 03790512 +03642806/points/241ec8a746dd1cfc78f71a335ebabfa5.pts 03642806/expert_verified/points_label/241ec8a746dd1cfc78f71a335ebabfa5.seg 03642806 +04379243/points/c6575b4c39a341c698d5fc0473d00a1c.pts 04379243/expert_verified/points_label/c6575b4c39a341c698d5fc0473d00a1c.seg 04379243 +02958343/points/219a0021526791d18bb5c0bf5eec83fc.pts 02958343/expert_verified/points_label/219a0021526791d18bb5c0bf5eec83fc.seg 02958343 +02691156/points/49917fb82beca4beca8607f540cc62ba.pts 02691156/expert_verified/points_label/49917fb82beca4beca8607f540cc62ba.seg 02691156 +03636649/points/dac278ab197b5efefaa6996ece0d86f4.pts 03636649/expert_verified/points_label/dac278ab197b5efefaa6996ece0d86f4.seg 03636649 +03467517/points/f146c58eaa06f5e4d57700c05b1862d8.pts 03467517/expert_verified/points_label/f146c58eaa06f5e4d57700c05b1862d8.seg 03467517 +04379243/points/aaf6be1d92a8c61fdcfcef693e7ec696.pts 04379243/expert_verified/points_label/aaf6be1d92a8c61fdcfcef693e7ec696.seg 04379243 +03001627/points/46789c1fb150dfaf51f77a6d7299806.pts 03001627/expert_verified/points_label/46789c1fb150dfaf51f77a6d7299806.seg 03001627 +03790512/points/4a2f0b20ef680347395d58407f193ba.pts 03790512/expert_verified/points_label/4a2f0b20ef680347395d58407f193ba.seg 03790512 +04379243/points/28ce06aa6f25b39f2d19175e7d19b7cb.pts 04379243/expert_verified/points_label/28ce06aa6f25b39f2d19175e7d19b7cb.seg 04379243 +02958343/points/1710ff46ca275e171df27141dea8c9a.pts 02958343/expert_verified/points_label/1710ff46ca275e171df27141dea8c9a.seg 02958343 +03636649/points/b57bcdb88c669663ec2a7a1f5fe7365d.pts 03636649/expert_verified/points_label/b57bcdb88c669663ec2a7a1f5fe7365d.seg 03636649 +04379243/points/c348d279fd22730a9741b7ee128375de.pts 04379243/expert_verified/points_label/c348d279fd22730a9741b7ee128375de.seg 04379243 +03001627/points/76fe7cf10c5dbf1edcb466b6f48b5810.pts 03001627/expert_verified/points_label/76fe7cf10c5dbf1edcb466b6f48b5810.seg 03001627 +04379243/points/7727cc0cb47705632dfc2f8d5d30193c.pts 04379243/expert_verified/points_label/7727cc0cb47705632dfc2f8d5d30193c.seg 04379243 +03797390/points/586e67c53f181dc22adf8abaa25e0215.pts 03797390/expert_verified/points_label/586e67c53f181dc22adf8abaa25e0215.seg 03797390 +04379243/points/d9b418e6ec14dbf50efffb055ed6bd1.pts 04379243/expert_verified/points_label/d9b418e6ec14dbf50efffb055ed6bd1.seg 04379243 +04379243/points/f52e52094d8240b2dcfcef693e7ec696.pts 04379243/expert_verified/points_label/f52e52094d8240b2dcfcef693e7ec696.seg 04379243 +02691156/points/821309c2037b49135fab3f99161dc2c2.pts 02691156/expert_verified/points_label/821309c2037b49135fab3f99161dc2c2.seg 02691156 +02954340/points/254e230d31a62470a52821bf1aa3b19a.pts 02954340/expert_verified/points_label/254e230d31a62470a52821bf1aa3b19a.seg 02954340 +02691156/points/e8de6c58f4a772d771d03b466c72ce41.pts 02691156/expert_verified/points_label/e8de6c58f4a772d771d03b466c72ce41.seg 02691156 +03642806/points/f1c6801e84c85a07bfb149497503af.pts 03642806/expert_verified/points_label/f1c6801e84c85a07bfb149497503af.seg 03642806 +02691156/points/a04d10b24ede5e9a3de778e85611513b.pts 02691156/expert_verified/points_label/a04d10b24ede5e9a3de778e85611513b.seg 02691156 +03467517/points/c8acdfaec5008118343b0b12983b9982.pts 03467517/expert_verified/points_label/c8acdfaec5008118343b0b12983b9982.seg 03467517 +03001627/points/9c3e53d9d1e653c0bf80985a99195eb8.pts 03001627/expert_verified/points_label/9c3e53d9d1e653c0bf80985a99195eb8.seg 03001627 +02691156/points/123bd9e948881939c38a1d3458dafa1b.pts 02691156/expert_verified/points_label/123bd9e948881939c38a1d3458dafa1b.seg 02691156 +03948459/points/abc7a1373f4b30291adcc40d88daf7c8.pts 03948459/expert_verified/points_label/abc7a1373f4b30291adcc40d88daf7c8.seg 03948459 +03636649/points/c906a9c7ae536a0c7fb7f79251dd7727.pts 03636649/expert_verified/points_label/c906a9c7ae536a0c7fb7f79251dd7727.seg 03636649 +03797390/points/e71102b6da1d63f3a363b55cbd344baa.pts 03797390/expert_verified/points_label/e71102b6da1d63f3a363b55cbd344baa.seg 03797390 +03642806/points/22389f9c3c049ce757c29983a611b1c6.pts 03642806/expert_verified/points_label/22389f9c3c049ce757c29983a611b1c6.seg 03642806 +04379243/points/5c2c29fd07c365afe5c65540d3456093.pts 04379243/expert_verified/points_label/5c2c29fd07c365afe5c65540d3456093.seg 04379243 +03001627/points/9a8dfc7a6831749f504721639e19f609.pts 03001627/expert_verified/points_label/9a8dfc7a6831749f504721639e19f609.seg 03001627 +03001627/points/d49ce87d43cf4c8f1679065e1c457f94.pts 03001627/expert_verified/points_label/d49ce87d43cf4c8f1679065e1c457f94.seg 03001627 +02691156/points/dfa36bffe436a98ee0534173b9189765.pts 02691156/expert_verified/points_label/dfa36bffe436a98ee0534173b9189765.seg 02691156 +04379243/points/987b7b49a1435a4b1b17743c18fb63dc.pts 04379243/expert_verified/points_label/987b7b49a1435a4b1b17743c18fb63dc.seg 04379243 +04379243/points/8d0d7787f4babee7e66285d36ebb986.pts 04379243/expert_verified/points_label/8d0d7787f4babee7e66285d36ebb986.seg 04379243 +04379243/points/4f06092100d0164013d2510999d0f1d2.pts 04379243/expert_verified/points_label/4f06092100d0164013d2510999d0f1d2.seg 04379243 +02958343/points/fce2b933f93d132f4f45033b2f001552.pts 02958343/expert_verified/points_label/fce2b933f93d132f4f45033b2f001552.seg 02958343 +04379243/points/3817a222e96acc4ca78510b72d2281ea.pts 04379243/expert_verified/points_label/3817a222e96acc4ca78510b72d2281ea.seg 04379243 +03001627/points/7ee09fdece7d9142afdb9a672b7d3b8a.pts 03001627/expert_verified/points_label/7ee09fdece7d9142afdb9a672b7d3b8a.seg 03001627 +04379243/points/676d05aaaeecb8a04b3c42e318f3affc.pts 04379243/expert_verified/points_label/676d05aaaeecb8a04b3c42e318f3affc.seg 04379243 +03624134/points/6813197ad5e7011fcc34b900bb2492e.pts 03624134/expert_verified/points_label/6813197ad5e7011fcc34b900bb2492e.seg 03624134 +04379243/points/ea367e390741fc38dcfcef693e7ec696.pts 04379243/expert_verified/points_label/ea367e390741fc38dcfcef693e7ec696.seg 04379243 +04379243/points/2e5ac0552fa296c43bbab77a66bc3671.pts 04379243/expert_verified/points_label/2e5ac0552fa296c43bbab77a66bc3671.seg 04379243 +03467517/points/32a337387527f39193f0194265a9746c.pts 03467517/expert_verified/points_label/32a337387527f39193f0194265a9746c.seg 03467517 +03001627/points/97cd4ed02e022ce7174150bd56e389a8.pts 03001627/expert_verified/points_label/97cd4ed02e022ce7174150bd56e389a8.seg 03001627 +04379243/points/88e06a85e2a0f99fa7e7cb173e141227.pts 04379243/expert_verified/points_label/88e06a85e2a0f99fa7e7cb173e141227.seg 04379243 +04379243/points/c5a02d586ea431a1e76bc197b3a3ffc0.pts 04379243/expert_verified/points_label/c5a02d586ea431a1e76bc197b3a3ffc0.seg 04379243 +03001627/points/bcdcb4928e07e4174a623eb2e3317415.pts 03001627/expert_verified/points_label/bcdcb4928e07e4174a623eb2e3317415.seg 03001627 +02691156/points/934dd5529c22cd05bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/934dd5529c22cd05bc0909d98a1ff2b4.seg 02691156 +03001627/points/e696f4c7cd88b8b52ff834514c92e8fd.pts 03001627/expert_verified/points_label/e696f4c7cd88b8b52ff834514c92e8fd.seg 03001627 +02691156/points/93ba822e84586999e3375a6b96a1d765.pts 02691156/expert_verified/points_label/93ba822e84586999e3375a6b96a1d765.seg 02691156 +02958343/points/3ac664a7486a0bdff200a72c9245aee7.pts 02958343/expert_verified/points_label/3ac664a7486a0bdff200a72c9245aee7.seg 02958343 +02691156/points/545cadae487b55bbc46ba5100bcdc520.pts 02691156/expert_verified/points_label/545cadae487b55bbc46ba5100bcdc520.seg 02691156 +03001627/points/c47f71319ead4eb8a4fb72f4f3b0e317.pts 03001627/expert_verified/points_label/c47f71319ead4eb8a4fb72f4f3b0e317.seg 03001627 +04379243/points/39bb09201e0cd201c17e7f250c5222bd.pts 04379243/expert_verified/points_label/39bb09201e0cd201c17e7f250c5222bd.seg 04379243 +04379243/points/13782b95eeefcedacf004563556ddb36.pts 04379243/expert_verified/points_label/13782b95eeefcedacf004563556ddb36.seg 04379243 +03001627/points/3cc90d903e0ec7aa61e11d707ecb7fa0.pts 03001627/expert_verified/points_label/3cc90d903e0ec7aa61e11d707ecb7fa0.seg 03001627 +04379243/points/4079aaabaa6451a2765ca89770f206ec.pts 04379243/expert_verified/points_label/4079aaabaa6451a2765ca89770f206ec.seg 04379243 +04379243/points/4bbf789edb243cafc955e5ed03ef3a2f.pts 04379243/expert_verified/points_label/4bbf789edb243cafc955e5ed03ef3a2f.seg 04379243 +02773838/points/6187bd900c3bc002ed13f430b2941481.pts 02773838/expert_verified/points_label/6187bd900c3bc002ed13f430b2941481.seg 02773838 +04379243/points/6dc6bb97c387b2f3af4e8812cf1b9e1.pts 04379243/expert_verified/points_label/6dc6bb97c387b2f3af4e8812cf1b9e1.seg 04379243 +03467517/points/9c260623916034b6f7d037d5768b173f.pts 03467517/expert_verified/points_label/9c260623916034b6f7d037d5768b173f.seg 03467517 +02691156/points/8d5c3d38de9c3685f2e77d54f4da142.pts 02691156/expert_verified/points_label/8d5c3d38de9c3685f2e77d54f4da142.seg 02691156 +04379243/points/6152e14b042aa17546f41dc2aaef556b.pts 04379243/expert_verified/points_label/6152e14b042aa17546f41dc2aaef556b.seg 04379243 +03467517/points/68a8bf89972cd337a77e8142614cdaae.pts 03467517/expert_verified/points_label/68a8bf89972cd337a77e8142614cdaae.seg 03467517 +02691156/points/3d5354863690ac7eca27bba175814d1.pts 02691156/expert_verified/points_label/3d5354863690ac7eca27bba175814d1.seg 02691156 +04379243/points/3411daa955306811d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/3411daa955306811d93768e7b9b1eabf.seg 04379243 +04379243/points/8594658920d6ea7b23656ce81843.pts 04379243/expert_verified/points_label/8594658920d6ea7b23656ce81843.seg 04379243 +02691156/points/a074750e28ed3818203936772104a82d.pts 02691156/expert_verified/points_label/a074750e28ed3818203936772104a82d.seg 02691156 +04379243/points/fcd4d0e1777f4841dcfcef693e7ec696.pts 04379243/expert_verified/points_label/fcd4d0e1777f4841dcfcef693e7ec696.seg 04379243 +03948459/points/708e38e7b733fd22bfae4699de9cb91a.pts 03948459/expert_verified/points_label/708e38e7b733fd22bfae4699de9cb91a.seg 03948459 +04379243/points/3c4e1361b066ea3b8ca998f0f87d0c84.pts 04379243/expert_verified/points_label/3c4e1361b066ea3b8ca998f0f87d0c84.seg 04379243 +03624134/points/38798b7013607bbf1e0b76f10c6e38af.pts 03624134/expert_verified/points_label/38798b7013607bbf1e0b76f10c6e38af.seg 03624134 +02691156/points/2176fa9f69e5e1dcca8607f540cc62ba.pts 02691156/expert_verified/points_label/2176fa9f69e5e1dcca8607f540cc62ba.seg 02691156 +03467517/points/8dd7df733a5ba17acae98171fea031ef.pts 03467517/expert_verified/points_label/8dd7df733a5ba17acae98171fea031ef.seg 03467517 +03001627/points/d3f31fd0fc99f45e8b3f6b4a44a70e52.pts 03001627/expert_verified/points_label/d3f31fd0fc99f45e8b3f6b4a44a70e52.seg 03001627 +02691156/points/118e8142a8cb1fe19a4a28ef635593ce.pts 02691156/expert_verified/points_label/118e8142a8cb1fe19a4a28ef635593ce.seg 02691156 +03624134/points/de62211649b4cced49384f9741ad64d8.pts 03624134/expert_verified/points_label/de62211649b4cced49384f9741ad64d8.seg 03624134 +03642806/points/7a4342f61ed7b153341aafe10fd0cbd4.pts 03642806/expert_verified/points_label/7a4342f61ed7b153341aafe10fd0cbd4.seg 03642806 +03001627/points/ba56f02dee485974c242632b2a8c3129.pts 03001627/expert_verified/points_label/ba56f02dee485974c242632b2a8c3129.seg 03001627 +04379243/points/97b7baeb8a172de42f56f09e5bc67bee.pts 04379243/expert_verified/points_label/97b7baeb8a172de42f56f09e5bc67bee.seg 04379243 +04379243/points/7b2af227264af938d42b9650f19dd425.pts 04379243/expert_verified/points_label/7b2af227264af938d42b9650f19dd425.seg 04379243 +04379243/points/e25fdb977fb867fdc3bd24f986301745.pts 04379243/expert_verified/points_label/e25fdb977fb867fdc3bd24f986301745.seg 04379243 +03467517/points/33da9c54f43be3e17693a84bff425e3.pts 03467517/expert_verified/points_label/33da9c54f43be3e17693a84bff425e3.seg 03467517 +02691156/points/e1e5cfcabcbe26a03087f84b199fd297.pts 02691156/expert_verified/points_label/e1e5cfcabcbe26a03087f84b199fd297.seg 02691156 +03636649/points/ba05811f301cdd791735ea0e092a805a.pts 03636649/expert_verified/points_label/ba05811f301cdd791735ea0e092a805a.seg 03636649 +03001627/points/6678f63c9b584a549d9e5580ae9f8738.pts 03001627/expert_verified/points_label/6678f63c9b584a549d9e5580ae9f8738.seg 03001627 +04379243/points/b6b8ede77085c0a95bea7c29e873d16.pts 04379243/expert_verified/points_label/b6b8ede77085c0a95bea7c29e873d16.seg 04379243 +02691156/points/d81042a53dd1cc5bd90bfc986bc4c94d.pts 02691156/expert_verified/points_label/d81042a53dd1cc5bd90bfc986bc4c94d.seg 02691156 +03001627/points/37b432326fecc8a1327289c00b6dc9ca.pts 03001627/expert_verified/points_label/37b432326fecc8a1327289c00b6dc9ca.seg 03001627 +03636649/points/c898f9b1dddbb8801735ea0e092a805a.pts 03636649/expert_verified/points_label/c898f9b1dddbb8801735ea0e092a805a.seg 03636649 +03001627/points/5d02aed0e9c93e829b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/5d02aed0e9c93e829b9f2eb77f5e247e.seg 03001627 +03001627/points/9a864d5de972a8c7cb686b8b855fed61.pts 03001627/expert_verified/points_label/9a864d5de972a8c7cb686b8b855fed61.seg 03001627 +04379243/points/b14a14cc2f3c38c9e3def9c422df2282.pts 04379243/expert_verified/points_label/b14a14cc2f3c38c9e3def9c422df2282.seg 04379243 +04379243/points/f2893a87ec37f8b3781cb4570305e329.pts 04379243/expert_verified/points_label/f2893a87ec37f8b3781cb4570305e329.seg 04379243 +02691156/points/3fa511e1882e41eeca8607f540cc62ba.pts 02691156/expert_verified/points_label/3fa511e1882e41eeca8607f540cc62ba.seg 02691156 +02691156/points/444d67950ff9a4cc1139bebb00fe5be8.pts 02691156/expert_verified/points_label/444d67950ff9a4cc1139bebb00fe5be8.seg 02691156 +03001627/points/3d3b7f63f5525b1ae37f5a622d383617.pts 03001627/expert_verified/points_label/3d3b7f63f5525b1ae37f5a622d383617.seg 03001627 +03001627/points/30beaf15d2d2beb1febad4f49b26ec52.pts 03001627/expert_verified/points_label/30beaf15d2d2beb1febad4f49b26ec52.seg 03001627 +04379243/points/59f04ddbd896f4f5430644dfe647c381.pts 04379243/expert_verified/points_label/59f04ddbd896f4f5430644dfe647c381.seg 04379243 +04379243/points/eb9b9b8d186a974a7afee304cce81d6f.pts 04379243/expert_verified/points_label/eb9b9b8d186a974a7afee304cce81d6f.seg 04379243 +03790512/points/7c4fc3a05d5fc8b1d0f568c31c1cd62a.pts 03790512/expert_verified/points_label/7c4fc3a05d5fc8b1d0f568c31c1cd62a.seg 03790512 +04379243/points/68142013a4f5e7c2febad4f49b26ec52.pts 04379243/expert_verified/points_label/68142013a4f5e7c2febad4f49b26ec52.seg 04379243 +02958343/points/8053e014516531ddc3f500d7b182f6.pts 02958343/expert_verified/points_label/8053e014516531ddc3f500d7b182f6.seg 02958343 +02958343/points/1a3782ae4bd711b66b418c7d9fedcaa9.pts 02958343/expert_verified/points_label/1a3782ae4bd711b66b418c7d9fedcaa9.seg 02958343 +04379243/points/cc58de930acd321fac242c3aebc81b2f.pts 04379243/expert_verified/points_label/cc58de930acd321fac242c3aebc81b2f.seg 04379243 +02691156/points/d4dac019726e980e203936772104a82d.pts 02691156/expert_verified/points_label/d4dac019726e980e203936772104a82d.seg 02691156 +02954340/points/6e983d20e0bf80296829cd4082fbdbdf.pts 02954340/expert_verified/points_label/6e983d20e0bf80296829cd4082fbdbdf.seg 02954340 +03636649/points/fad026744a6abb1937cf479d4bb58d.pts 03636649/expert_verified/points_label/fad026744a6abb1937cf479d4bb58d.seg 03636649 +02958343/points/4d2d4e26349be1f3be2cbcda9b6dc9b2.pts 02958343/expert_verified/points_label/4d2d4e26349be1f3be2cbcda9b6dc9b2.seg 02958343 +03636649/points/280fa01686e780ba3501c961e91ff6d7.pts 03636649/expert_verified/points_label/280fa01686e780ba3501c961e91ff6d7.seg 03636649 +04379243/points/f02907c5c42e1e766f1e07a56c129dfc.pts 04379243/expert_verified/points_label/f02907c5c42e1e766f1e07a56c129dfc.seg 04379243 +04379243/points/5f100571ffd90f8252b4875f731f71cd.pts 04379243/expert_verified/points_label/5f100571ffd90f8252b4875f731f71cd.seg 04379243 +04379243/points/f718cb5d6202341dc183308b9aafe2ca.pts 04379243/expert_verified/points_label/f718cb5d6202341dc183308b9aafe2ca.seg 04379243 +03642806/points/b436271050d647052f8d6d501b18a4b5.pts 03642806/expert_verified/points_label/b436271050d647052f8d6d501b18a4b5.seg 03642806 +03001627/points/6dddf2b95ca09bf5febad4f49b26ec52.pts 03001627/expert_verified/points_label/6dddf2b95ca09bf5febad4f49b26ec52.seg 03001627 +02691156/points/b812c2df636aa0218b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/b812c2df636aa0218b96ae1a0a8b84ec.seg 02691156 +02958343/points/89edb3d434f4c983afe1d4530f4c6e24.pts 02958343/expert_verified/points_label/89edb3d434f4c983afe1d4530f4c6e24.seg 02958343 +02958343/points/80ac9cc0d4c9dde3b7a7bc444c2d756b.pts 02958343/expert_verified/points_label/80ac9cc0d4c9dde3b7a7bc444c2d756b.seg 02958343 +04379243/points/b62d45745434ac46c4cfe384be4426c3.pts 04379243/expert_verified/points_label/b62d45745434ac46c4cfe384be4426c3.seg 04379243 +04379243/points/9c4afb731e910d3723500a5b036df62e.pts 04379243/expert_verified/points_label/9c4afb731e910d3723500a5b036df62e.seg 04379243 +04379243/points/43fcddd5232a6021a56e8b79ca4e2911.pts 04379243/expert_verified/points_label/43fcddd5232a6021a56e8b79ca4e2911.seg 04379243 +04379243/points/6724ae69c0bde4c09b7dad6c9c46bcf1.pts 04379243/expert_verified/points_label/6724ae69c0bde4c09b7dad6c9c46bcf1.seg 04379243 +03001627/points/323fc7b1d2b44cb7ff2b8acf844d34d2.pts 03001627/expert_verified/points_label/323fc7b1d2b44cb7ff2b8acf844d34d2.seg 03001627 +03001627/points/434cee44934612a81f98c0761af40e04.pts 03001627/expert_verified/points_label/434cee44934612a81f98c0761af40e04.seg 03001627 +03636649/points/31dee666120727b0be78c8b300d2a963.pts 03636649/expert_verified/points_label/31dee666120727b0be78c8b300d2a963.seg 03636649 +02958343/points/48f5446e6ac9c1b51f1446551412bde4.pts 02958343/expert_verified/points_label/48f5446e6ac9c1b51f1446551412bde4.seg 02958343 +04379243/points/aa3eb180a4f6d8d42de421c2ab5cfb52.pts 04379243/expert_verified/points_label/aa3eb180a4f6d8d42de421c2ab5cfb52.seg 04379243 +04379243/points/14e5e4db3246dacff12d7184a2ad3430.pts 04379243/expert_verified/points_label/14e5e4db3246dacff12d7184a2ad3430.seg 04379243 +03001627/points/96c0ecd1ef80e818c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/96c0ecd1ef80e818c8687ff9b0b4e4ac.seg 03001627 +04225987/points/d4c042d11f29dffa1082f2ea630bf69e.pts 04225987/expert_verified/points_label/d4c042d11f29dffa1082f2ea630bf69e.seg 04225987 +03642806/points/7ebff305b2e93504239603972bcd2e7b.pts 03642806/expert_verified/points_label/7ebff305b2e93504239603972bcd2e7b.seg 03642806 +03467517/points/369fc7f8d880e1b793f0194265a9746c.pts 03467517/expert_verified/points_label/369fc7f8d880e1b793f0194265a9746c.seg 03467517 +04379243/points/25f69a74efbff4d071a782a4379556c7.pts 04379243/expert_verified/points_label/25f69a74efbff4d071a782a4379556c7.seg 04379243 +04379243/points/7cd4844def36a9f5bc7589eefbdbc3c5.pts 04379243/expert_verified/points_label/7cd4844def36a9f5bc7589eefbdbc3c5.seg 04379243 +03467517/points/5852a24dde24a8ef93f0194265a9746c.pts 03467517/expert_verified/points_label/5852a24dde24a8ef93f0194265a9746c.seg 03467517 +03001627/points/df8440d8678f3a91c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/df8440d8678f3a91c8687ff9b0b4e4ac.seg 03001627 +04379243/points/49bf25ff4401946524c10ba1eb690638.pts 04379243/expert_verified/points_label/49bf25ff4401946524c10ba1eb690638.seg 04379243 +03001627/points/7eedcb6d76b8c23a9cdb421f6af95e5f.pts 03001627/expert_verified/points_label/7eedcb6d76b8c23a9cdb421f6af95e5f.seg 03001627 +03797390/points/ff1a44e1c1785d618bca309f2c51966a.pts 03797390/expert_verified/points_label/ff1a44e1c1785d618bca309f2c51966a.seg 03797390 +02958343/points/85f3dc3318f5200c8672c9b355cd2075.pts 02958343/expert_verified/points_label/85f3dc3318f5200c8672c9b355cd2075.seg 02958343 +02691156/points/c9be9f07f5ae7c375d7629390efe0a2.pts 02691156/expert_verified/points_label/c9be9f07f5ae7c375d7629390efe0a2.seg 02691156 +02691156/points/14cd2f1de7f68bf3ab550998f901c8e1.pts 02691156/expert_verified/points_label/14cd2f1de7f68bf3ab550998f901c8e1.seg 02691156 +02958343/points/81fad64b8fd8f010b17445a1c29f6d34.pts 02958343/expert_verified/points_label/81fad64b8fd8f010b17445a1c29f6d34.seg 02958343 +02958343/points/fe2ce22107693354f1cc1cb691702a23.pts 02958343/expert_verified/points_label/fe2ce22107693354f1cc1cb691702a23.seg 02958343 +02691156/points/74cbf170c5f2fb587d9c9c8a8ba32919.pts 02691156/expert_verified/points_label/74cbf170c5f2fb587d9c9c8a8ba32919.seg 02691156 +02691156/points/67dbb0de722cf5cd7a734abc5ba1db0f.pts 02691156/expert_verified/points_label/67dbb0de722cf5cd7a734abc5ba1db0f.seg 02691156 +04379243/points/fa345f8f107d93b9ba70f71694a4b74c.pts 04379243/expert_verified/points_label/fa345f8f107d93b9ba70f71694a4b74c.seg 04379243 +04379243/points/a45a7ba9a2842a55634c21965ee6bab.pts 04379243/expert_verified/points_label/a45a7ba9a2842a55634c21965ee6bab.seg 04379243 +04379243/points/8d7ac6078989980fad16260d4d73b56.pts 04379243/expert_verified/points_label/8d7ac6078989980fad16260d4d73b56.seg 04379243 +03001627/points/e803b31e2185d0405784b22e1081a3e1.pts 03001627/expert_verified/points_label/e803b31e2185d0405784b22e1081a3e1.seg 03001627 +04379243/points/aaf3aeda0f848344b87028a4b477349f.pts 04379243/expert_verified/points_label/aaf3aeda0f848344b87028a4b477349f.seg 04379243 +03636649/points/e94aab17400945413225afab722d9fd2.pts 03636649/expert_verified/points_label/e94aab17400945413225afab722d9fd2.seg 03636649 +03001627/points/d2c465e85d2e8f1fcea003eff0268278.pts 03001627/expert_verified/points_label/d2c465e85d2e8f1fcea003eff0268278.seg 03001627 +03001627/points/88376e3d3a23d263de29d28278a34a18.pts 03001627/expert_verified/points_label/88376e3d3a23d263de29d28278a34a18.seg 03001627 +04379243/points/4775e71d37374444febad4f49b26ec52.pts 04379243/expert_verified/points_label/4775e71d37374444febad4f49b26ec52.seg 04379243 +03636649/points/f12822778713f5e35b36bbc16e99b441.pts 03636649/expert_verified/points_label/f12822778713f5e35b36bbc16e99b441.seg 03636649 +03636649/points/963e6743370d5c5c9b5d51fa8cce1753.pts 03636649/expert_verified/points_label/963e6743370d5c5c9b5d51fa8cce1753.seg 03636649 +04379243/points/13c51c08c3695a09eda47978b73f5994.pts 04379243/expert_verified/points_label/13c51c08c3695a09eda47978b73f5994.seg 04379243 +04379243/points/89827ac677337629ab610b0c94236463.pts 04379243/expert_verified/points_label/89827ac677337629ab610b0c94236463.seg 04379243 +04379243/points/89b478643e53d3d6285c99063fc6fcf8.pts 04379243/expert_verified/points_label/89b478643e53d3d6285c99063fc6fcf8.seg 04379243 +04379243/points/401cd99ace3b92fadf6cfab91d65bb91.pts 04379243/expert_verified/points_label/401cd99ace3b92fadf6cfab91d65bb91.seg 04379243 +04379243/points/74c3d551e32a1cca664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/74c3d551e32a1cca664b3b9b23ddfcbc.seg 04379243 +04379243/points/db64db160fd13a514e1a714ee619465a.pts 04379243/expert_verified/points_label/db64db160fd13a514e1a714ee619465a.seg 04379243 +03001627/points/8e664a0bcaf9d2a45ca1aaa0789db621.pts 03001627/expert_verified/points_label/8e664a0bcaf9d2a45ca1aaa0789db621.seg 03001627 +03001627/points/43897195d7f893d759c257be4c612509.pts 03001627/expert_verified/points_label/43897195d7f893d759c257be4c612509.seg 03001627 +04379243/points/e6d8569c0957e7453002761e7a3ba3bd.pts 04379243/expert_verified/points_label/e6d8569c0957e7453002761e7a3ba3bd.seg 04379243 +03636649/points/ead77648c9c7dbf8d42b9650f19dd425.pts 03636649/expert_verified/points_label/ead77648c9c7dbf8d42b9650f19dd425.seg 03636649 +03636649/points/c54d3a5a9c8a655e46407779dbd69b2d.pts 03636649/expert_verified/points_label/c54d3a5a9c8a655e46407779dbd69b2d.seg 03636649 +03001627/points/379f0efc898d7a7e9fe74a48bbc553d7.pts 03001627/expert_verified/points_label/379f0efc898d7a7e9fe74a48bbc553d7.seg 03001627 +04379243/points/c1d44782ac45d6fe3671949e4f99cc76.pts 04379243/expert_verified/points_label/c1d44782ac45d6fe3671949e4f99cc76.seg 04379243 +04379243/points/7b3b160dafe7e122d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/7b3b160dafe7e122d93768e7b9b1eabf.seg 04379243 +03001627/points/7f271ecbdeb7610d637adadafee6f182.pts 03001627/expert_verified/points_label/7f271ecbdeb7610d637adadafee6f182.seg 03001627 +02958343/points/df34c25a1e1abe9428044fe9244db50a.pts 02958343/expert_verified/points_label/df34c25a1e1abe9428044fe9244db50a.seg 02958343 +03948459/points/98c0bd351e275b3c96893524e607761d.pts 03948459/expert_verified/points_label/98c0bd351e275b3c96893524e607761d.seg 03948459 +03636649/points/b96c8cc6529167bfcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/b96c8cc6529167bfcb8d8c6d4df8143.seg 03636649 +03624134/points/a33847e9c32c1afc93ac017b81605788.pts 03624134/expert_verified/points_label/a33847e9c32c1afc93ac017b81605788.seg 03624134 +03001627/points/594d5b7f3e705a1ab3234e0da44b11e4.pts 03001627/expert_verified/points_label/594d5b7f3e705a1ab3234e0da44b11e4.seg 03001627 +03001627/points/f0f04644e071d9348ca588a3264b9f86.pts 03001627/expert_verified/points_label/f0f04644e071d9348ca588a3264b9f86.seg 03001627 +02691156/points/4bdb2c4fc6701174ca8607f540cc62ba.pts 02691156/expert_verified/points_label/4bdb2c4fc6701174ca8607f540cc62ba.seg 02691156 +03001627/points/fc2a1c4c332f7731e45ef4135c266a12.pts 03001627/expert_verified/points_label/fc2a1c4c332f7731e45ef4135c266a12.seg 03001627 +02691156/points/df68b8fb9f4531b42e690fa6dfd5d610.pts 02691156/expert_verified/points_label/df68b8fb9f4531b42e690fa6dfd5d610.seg 02691156 +03642806/points/517de75577ac6e8a42b9615216f9a30d.pts 03642806/expert_verified/points_label/517de75577ac6e8a42b9615216f9a30d.seg 03642806 +03001627/points/74cc57ea0e2e06dbe4106b1d06dc89b3.pts 03001627/expert_verified/points_label/74cc57ea0e2e06dbe4106b1d06dc89b3.seg 03001627 +02691156/points/d72a483cf8a0cf2bbbf3143b1cb6076a.pts 02691156/expert_verified/points_label/d72a483cf8a0cf2bbbf3143b1cb6076a.seg 02691156 +03001627/points/9c7b2ed3770d1a6ea6fee8e2140acec9.pts 03001627/expert_verified/points_label/9c7b2ed3770d1a6ea6fee8e2140acec9.seg 03001627 +04379243/points/28fb9a81898f88c4ae8375def5e736d8.pts 04379243/expert_verified/points_label/28fb9a81898f88c4ae8375def5e736d8.seg 04379243 +03636649/points/c0b0d7e15d3dfab1733c22d8b8e1c33d.pts 03636649/expert_verified/points_label/c0b0d7e15d3dfab1733c22d8b8e1c33d.seg 03636649 +03001627/points/bb04dc0b336abf4b263915c09bc4854f.pts 03001627/expert_verified/points_label/bb04dc0b336abf4b263915c09bc4854f.seg 03001627 +03001627/points/6caccdad9f8d4f0a7f1cdfc0a8f38f2e.pts 03001627/expert_verified/points_label/6caccdad9f8d4f0a7f1cdfc0a8f38f2e.seg 03001627 +04379243/points/86ad91ef08c53dd77189b31b3e8c8ef3.pts 04379243/expert_verified/points_label/86ad91ef08c53dd77189b31b3e8c8ef3.seg 04379243 +03790512/points/80e717f07645a4a0b37378f3c85478b4.pts 03790512/expert_verified/points_label/80e717f07645a4a0b37378f3c85478b4.seg 03790512 +02691156/points/7d226c520a29c7705e28caa3b26a73fd.pts 02691156/expert_verified/points_label/7d226c520a29c7705e28caa3b26a73fd.seg 02691156 +04379243/points/89c095a52766ecb05d2ac47f638a4ea4.pts 04379243/expert_verified/points_label/89c095a52766ecb05d2ac47f638a4ea4.seg 04379243 +04379243/points/7b92f6facc2a27bc84cc0348a73b80c3.pts 04379243/expert_verified/points_label/7b92f6facc2a27bc84cc0348a73b80c3.seg 04379243 +04379243/points/d578287c4a9452efa9af104529ef47c3.pts 04379243/expert_verified/points_label/d578287c4a9452efa9af104529ef47c3.seg 04379243 +03636649/points/1475fe59961fc726f096eadaad23f93d.pts 03636649/expert_verified/points_label/1475fe59961fc726f096eadaad23f93d.seg 03636649 +03790512/points/7d75e8200565ffa7b37378f3c85478b4.pts 03790512/expert_verified/points_label/7d75e8200565ffa7b37378f3c85478b4.seg 03790512 +04379243/points/852826a94cce36ea9f1deb04fb8ae481.pts 04379243/expert_verified/points_label/852826a94cce36ea9f1deb04fb8ae481.seg 04379243 +03001627/points/9c50878c91aeb8126bb6bc0db07c71e8.pts 03001627/expert_verified/points_label/9c50878c91aeb8126bb6bc0db07c71e8.seg 03001627 +02691156/points/ce827e4c857d553f71d03b466c72ce41.pts 02691156/expert_verified/points_label/ce827e4c857d553f71d03b466c72ce41.seg 02691156 +03001627/points/3aab16309520fb21dc0a8cba62d9a78a.pts 03001627/expert_verified/points_label/3aab16309520fb21dc0a8cba62d9a78a.seg 03001627 +03001627/points/697cfbe6e043136b737a00f007529fbf.pts 03001627/expert_verified/points_label/697cfbe6e043136b737a00f007529fbf.seg 03001627 +04379243/points/fd7769d0eba554c53def89b32cef8e45.pts 04379243/expert_verified/points_label/fd7769d0eba554c53def89b32cef8e45.seg 04379243 +03948459/points/d7e86e0e5b1982d4bf0ab4d7096d87f2.pts 03948459/expert_verified/points_label/d7e86e0e5b1982d4bf0ab4d7096d87f2.seg 03948459 +03001627/points/70cb8d70d961ca48b04cb542e2c50eb4.pts 03001627/expert_verified/points_label/70cb8d70d961ca48b04cb542e2c50eb4.seg 03001627 +03636649/points/c26b7862f2afb7ee4b3c42e318f3affc.pts 03636649/expert_verified/points_label/c26b7862f2afb7ee4b3c42e318f3affc.seg 03636649 +03624134/points/906b20dc0a5a5022714112b147c95c8b.pts 03624134/expert_verified/points_label/906b20dc0a5a5022714112b147c95c8b.seg 03624134 +03001627/points/f5caa9b5ada31a8b3cf15c77de45986.pts 03001627/expert_verified/points_label/f5caa9b5ada31a8b3cf15c77de45986.seg 03001627 +04379243/points/6110d87def4fa88c154c6bbaeb7d331f.pts 04379243/expert_verified/points_label/6110d87def4fa88c154c6bbaeb7d331f.seg 04379243 +03642806/points/b5f6fd84a3f44ddb1aa47689117a61e1.pts 03642806/expert_verified/points_label/b5f6fd84a3f44ddb1aa47689117a61e1.seg 03642806 +03001627/points/95317d46812e4ed4df5aea2392d894b4.pts 03001627/expert_verified/points_label/95317d46812e4ed4df5aea2392d894b4.seg 03001627 +02691156/points/471ca950dbdf0c6c5f80f808704d6409.pts 02691156/expert_verified/points_label/471ca950dbdf0c6c5f80f808704d6409.seg 02691156 +04379243/points/c9f85a671d551086d61f9b2773e1d72a.pts 04379243/expert_verified/points_label/c9f85a671d551086d61f9b2773e1d72a.seg 04379243 +04379243/points/70f1b5f74faa9bda664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/70f1b5f74faa9bda664b3b9b23ddfcbc.seg 04379243 +02691156/points/9a266b3a734e374687bf26680c510802.pts 02691156/expert_verified/points_label/9a266b3a734e374687bf26680c510802.seg 02691156 +03001627/points/4c0983329afcd06f730e89ca0d2d13c3.pts 03001627/expert_verified/points_label/4c0983329afcd06f730e89ca0d2d13c3.seg 03001627 +04379243/points/a7172fa4177661f4858699aaad4acee4.pts 04379243/expert_verified/points_label/a7172fa4177661f4858699aaad4acee4.seg 04379243 +04379243/points/504d908a55f3e0c764810cc21086da42.pts 04379243/expert_verified/points_label/504d908a55f3e0c764810cc21086da42.seg 04379243 +03948459/points/7ba9f65e926d5e3e6fe695987d47043.pts 03948459/expert_verified/points_label/7ba9f65e926d5e3e6fe695987d47043.seg 03948459 +04379243/points/5b546ef5de5d10f3ecc9201d3d846bc1.pts 04379243/expert_verified/points_label/5b546ef5de5d10f3ecc9201d3d846bc1.seg 04379243 +04379243/points/80f986ae572fce791429f9a19502375a.pts 04379243/expert_verified/points_label/80f986ae572fce791429f9a19502375a.seg 04379243 +04379243/points/fd7a579772b195532de421c2ab5cfb52.pts 04379243/expert_verified/points_label/fd7a579772b195532de421c2ab5cfb52.seg 04379243 +03001627/points/e09466e9c122dbfdf51f77a6d7299806.pts 03001627/expert_verified/points_label/e09466e9c122dbfdf51f77a6d7299806.seg 03001627 +04379243/points/2a80c95b4bbcb73d87ed2480ebb0f3d2.pts 04379243/expert_verified/points_label/2a80c95b4bbcb73d87ed2480ebb0f3d2.seg 04379243 +03467517/points/e0d74618e316b0f16d9376f644442e99.pts 03467517/expert_verified/points_label/e0d74618e316b0f16d9376f644442e99.seg 03467517 +03001627/points/587ebb2aa71acfe644dd3aaee16d3f4c.pts 03001627/expert_verified/points_label/587ebb2aa71acfe644dd3aaee16d3f4c.seg 03001627 +03467517/points/10d2c216c70b788485b61f146daff2fb.pts 03467517/expert_verified/points_label/10d2c216c70b788485b61f146daff2fb.seg 03467517 +04379243/points/3c72ddd0dca19bbedcfcef693e7ec696.pts 04379243/expert_verified/points_label/3c72ddd0dca19bbedcfcef693e7ec696.seg 04379243 +03001627/points/2742c0a5e984d92fa0dcc52ca811e565.pts 03001627/expert_verified/points_label/2742c0a5e984d92fa0dcc52ca811e565.seg 03001627 +03624134/points/792f252dcb06f042dd56c1edf3f6e336.pts 03624134/expert_verified/points_label/792f252dcb06f042dd56c1edf3f6e336.seg 03624134 +02691156/points/8fa9e2e8dbed43911f32208e53f871eb.pts 02691156/expert_verified/points_label/8fa9e2e8dbed43911f32208e53f871eb.seg 02691156 +03001627/points/d4f5c3e3eab52d0a3334fb6668ccd834.pts 03001627/expert_verified/points_label/d4f5c3e3eab52d0a3334fb6668ccd834.seg 03001627 +03642806/points/520d98e360cf44ec8139dd63d55edc44.pts 03642806/expert_verified/points_label/520d98e360cf44ec8139dd63d55edc44.seg 03642806 +03467517/points/2eba922263fc1580cc010a80df5d3c87.pts 03467517/expert_verified/points_label/2eba922263fc1580cc010a80df5d3c87.seg 03467517 +04379243/points/53c11596c3fc36a8a5094cb6d104b35.pts 04379243/expert_verified/points_label/53c11596c3fc36a8a5094cb6d104b35.seg 04379243 +03467517/points/265009e163bf5c6f69da8e7f9a803d12.pts 03467517/expert_verified/points_label/265009e163bf5c6f69da8e7f9a803d12.seg 03467517 +04379243/points/fbdf9bffeb353474c3a767747b75e56.pts 04379243/expert_verified/points_label/fbdf9bffeb353474c3a767747b75e56.seg 04379243 +03636649/points/b4af7e9a7338a9a3225afab722d9fd2.pts 03636649/expert_verified/points_label/b4af7e9a7338a9a3225afab722d9fd2.seg 03636649 +03001627/points/55eeb952519ceb87c3bd24f986301745.pts 03001627/expert_verified/points_label/55eeb952519ceb87c3bd24f986301745.seg 03001627 +04379243/points/2259e09ebd0ed2befebad4f49b26ec52.pts 04379243/expert_verified/points_label/2259e09ebd0ed2befebad4f49b26ec52.seg 04379243 +04379243/points/63fedc0334f5552dbec3a71604e140e3.pts 04379243/expert_verified/points_label/63fedc0334f5552dbec3a71604e140e3.seg 04379243 +03001627/points/70ac5cb405df84575e62305d14755686.pts 03001627/expert_verified/points_label/70ac5cb405df84575e62305d14755686.seg 03001627 +03001627/points/3f41b4339ebd59c1c397356311cbeea4.pts 03001627/expert_verified/points_label/3f41b4339ebd59c1c397356311cbeea4.seg 03001627 +04379243/points/10bb44a54a12a74e4719088c8e42c6ab.pts 04379243/expert_verified/points_label/10bb44a54a12a74e4719088c8e42c6ab.seg 04379243 +04379243/points/a83cda80e5c5a0fc3719086e0b4ab8be.pts 04379243/expert_verified/points_label/a83cda80e5c5a0fc3719086e0b4ab8be.seg 04379243 +04379243/points/74983e99e7606eb114708467db3d00e2.pts 04379243/expert_verified/points_label/74983e99e7606eb114708467db3d00e2.seg 04379243 +03001627/points/e052eaa1d5bbe795ded10515704c9720.pts 03001627/expert_verified/points_label/e052eaa1d5bbe795ded10515704c9720.seg 03001627 +02691156/points/35892510dcd7cebb87bf26680c510802.pts 02691156/expert_verified/points_label/35892510dcd7cebb87bf26680c510802.seg 02691156 +03001627/points/7f73cc6c1c9121a9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/7f73cc6c1c9121a9b9f2eb77f5e247e.seg 03001627 +03001627/points/2a8554af80cfa5e719fb4103277a6b93.pts 03001627/expert_verified/points_label/2a8554af80cfa5e719fb4103277a6b93.seg 03001627 +04379243/points/f82a5f3c2a57655d825da2b9ec9c8c29.pts 04379243/expert_verified/points_label/f82a5f3c2a57655d825da2b9ec9c8c29.seg 04379243 +02691156/points/319cf93077118d19f64801ad2940cdd5.pts 02691156/expert_verified/points_label/319cf93077118d19f64801ad2940cdd5.seg 02691156 +03790512/points/5bb3597d49c58017b37378f3c85478b4.pts 03790512/expert_verified/points_label/5bb3597d49c58017b37378f3c85478b4.seg 03790512 +02958343/points/17926c1ef484b73e6758a098566bc94e.pts 02958343/expert_verified/points_label/17926c1ef484b73e6758a098566bc94e.seg 02958343 +04379243/points/345c1bb95b12ff8c013a7bed5288654.pts 04379243/expert_verified/points_label/345c1bb95b12ff8c013a7bed5288654.seg 04379243 +03001627/points/3b788994cd578990c35131da26f8061a.pts 03001627/expert_verified/points_label/3b788994cd578990c35131da26f8061a.seg 03001627 +03636649/points/c25cc72cd06852e75bbea6ee257e41cc.pts 03636649/expert_verified/points_label/c25cc72cd06852e75bbea6ee257e41cc.seg 03636649 +03001627/points/4e4570768f981ca7b95617254e8005c0.pts 03001627/expert_verified/points_label/4e4570768f981ca7b95617254e8005c0.seg 03001627 +03642806/points/ef6d92c90aeabf5becae27d182a3e41c.pts 03642806/expert_verified/points_label/ef6d92c90aeabf5becae27d182a3e41c.seg 03642806 +04379243/points/97718e2651d22b3a74740f837351e7eb.pts 04379243/expert_verified/points_label/97718e2651d22b3a74740f837351e7eb.seg 04379243 +03948459/points/1f646ff59cabdddcd810dcd63f342aca.pts 03948459/expert_verified/points_label/1f646ff59cabdddcd810dcd63f342aca.seg 03948459 +02958343/points/74f7b559d6af926012f2e446484bbaf7.pts 02958343/expert_verified/points_label/74f7b559d6af926012f2e446484bbaf7.seg 02958343 +03001627/points/8b3619396de4df10db8860d0872e9c55.pts 03001627/expert_verified/points_label/8b3619396de4df10db8860d0872e9c55.seg 03001627 +03001627/points/44ddb3d46266bb0ffebad4f49b26ec52.pts 03001627/expert_verified/points_label/44ddb3d46266bb0ffebad4f49b26ec52.seg 03001627 +03001627/points/a5f300f3975497fa9dcf2183c858e6e5.pts 03001627/expert_verified/points_label/a5f300f3975497fa9dcf2183c858e6e5.seg 03001627 +03467517/points/113b65f0e68314737c481698bd5233b4.pts 03467517/expert_verified/points_label/113b65f0e68314737c481698bd5233b4.seg 03467517 +03001627/points/49795a9ebd9a9c6d2c697f0a1454869.pts 03001627/expert_verified/points_label/49795a9ebd9a9c6d2c697f0a1454869.seg 03001627 +03001627/points/5822ae77b06bea3091da37ff8bdd2524.pts 03001627/expert_verified/points_label/5822ae77b06bea3091da37ff8bdd2524.seg 03001627 +03467517/points/15222c5926c7058cc6df7dab8e567ef6.pts 03467517/expert_verified/points_label/15222c5926c7058cc6df7dab8e567ef6.seg 03467517 +02691156/points/14d9c576d06622198f52dc705c3109b9.pts 02691156/expert_verified/points_label/14d9c576d06622198f52dc705c3109b9.seg 02691156 +04379243/points/62ae9ded861138be9d2be74cfb51ade1.pts 04379243/expert_verified/points_label/62ae9ded861138be9d2be74cfb51ade1.seg 04379243 +02958343/points/7b067be3aa39b1a124853ec273f6c1d2.pts 02958343/expert_verified/points_label/7b067be3aa39b1a124853ec273f6c1d2.seg 02958343 +03636649/points/66cf69a98ff895e2b55fde51a411949f.pts 03636649/expert_verified/points_label/66cf69a98ff895e2b55fde51a411949f.seg 03636649 +04379243/points/3253f2c59e6bd2a119fb4103277a6b93.pts 04379243/expert_verified/points_label/3253f2c59e6bd2a119fb4103277a6b93.seg 04379243 +02691156/points/fe0c4db38fb6399990b1d6deb98feec6.pts 02691156/expert_verified/points_label/fe0c4db38fb6399990b1d6deb98feec6.seg 02691156 +02691156/points/6d93492543d1087eb87697d3904b168b.pts 02691156/expert_verified/points_label/6d93492543d1087eb87697d3904b168b.seg 02691156 +03636649/points/402f7ce2b87e7d1ac066b9622c005c53.pts 03636649/expert_verified/points_label/402f7ce2b87e7d1ac066b9622c005c53.seg 03636649 +04379243/points/272a4cf3cfff3eb1e173cee47fbaa88.pts 04379243/expert_verified/points_label/272a4cf3cfff3eb1e173cee47fbaa88.seg 04379243 +02691156/points/6420a3ff5e526d59e16519c843f95ce0.pts 02691156/expert_verified/points_label/6420a3ff5e526d59e16519c843f95ce0.seg 02691156 +03001627/points/487040c5fdc68fdfe6cfc789522bfbab.pts 03001627/expert_verified/points_label/487040c5fdc68fdfe6cfc789522bfbab.seg 03001627 +04379243/points/8f48ccd17a15baf5ce01c07526cf2aa4.pts 04379243/expert_verified/points_label/8f48ccd17a15baf5ce01c07526cf2aa4.seg 04379243 +03001627/points/40e5d8e71ee3902a31358207d42bcb21.pts 03001627/expert_verified/points_label/40e5d8e71ee3902a31358207d42bcb21.seg 03001627 +03636649/points/68491d576b5d35aade8e7376ce4e111f.pts 03636649/expert_verified/points_label/68491d576b5d35aade8e7376ce4e111f.seg 03636649 +03467517/points/80aa2f0d66100844925eded29d6897b9.pts 03467517/expert_verified/points_label/80aa2f0d66100844925eded29d6897b9.seg 03467517 +03001627/points/7929676e756dcd41577b5d737869717e.pts 03001627/expert_verified/points_label/7929676e756dcd41577b5d737869717e.seg 03001627 +03001627/points/2cf7ccf97b09187fcb7547c95fbdff26.pts 03001627/expert_verified/points_label/2cf7ccf97b09187fcb7547c95fbdff26.seg 03001627 +02691156/points/e8409b544c626028a9b2becd26dc2fc1.pts 02691156/expert_verified/points_label/e8409b544c626028a9b2becd26dc2fc1.seg 02691156 +02691156/points/1e2de00cf19a0a33554ccf8c30febe7.pts 02691156/expert_verified/points_label/1e2de00cf19a0a33554ccf8c30febe7.seg 02691156 +02691156/points/8f40518bd30467151e5ae32cb9e3711f.pts 02691156/expert_verified/points_label/8f40518bd30467151e5ae32cb9e3711f.seg 02691156 +02958343/points/4f0147c8a158087a4c19dab9f2c7c52d.pts 02958343/expert_verified/points_label/4f0147c8a158087a4c19dab9f2c7c52d.seg 02958343 +03624134/points/954fb0819736737a1b9c8e2fdbfc1118.pts 03624134/expert_verified/points_label/954fb0819736737a1b9c8e2fdbfc1118.seg 03624134 +04379243/points/415a08a66b8527519f803a8da27dd9a9.pts 04379243/expert_verified/points_label/415a08a66b8527519f803a8da27dd9a9.seg 04379243 +03001627/points/4bdbecfbc925219157915a20ae9ec6b6.pts 03001627/expert_verified/points_label/4bdbecfbc925219157915a20ae9ec6b6.seg 03001627 +03624134/points/2f74196bd5cb462727c767f081f1365a.pts 03624134/expert_verified/points_label/2f74196bd5cb462727c767f081f1365a.seg 03624134 +02958343/points/b5b6b09711cbee6daa44bfa127abe4bb.pts 02958343/expert_verified/points_label/b5b6b09711cbee6daa44bfa127abe4bb.seg 02958343 +03001627/points/43e74f15a986eb626a90f735365ac29e.pts 03001627/expert_verified/points_label/43e74f15a986eb626a90f735365ac29e.seg 03001627 +03624134/points/385bb539629cd6991dd89e5fcd05911a.pts 03624134/expert_verified/points_label/385bb539629cd6991dd89e5fcd05911a.seg 03624134 +03642806/points/fdec2b8af5dd988cef56c22fd326c67.pts 03642806/expert_verified/points_label/fdec2b8af5dd988cef56c22fd326c67.seg 03642806 +02958343/points/244a8476648bd073834daea73aa18748.pts 02958343/expert_verified/points_label/244a8476648bd073834daea73aa18748.seg 02958343 +03467517/points/d91b0745e57f6508dc6782957fd2f5d2.pts 03467517/expert_verified/points_label/d91b0745e57f6508dc6782957fd2f5d2.seg 03467517 +04379243/points/83f1ff21744e71ad2690c0a5b39562ad.pts 04379243/expert_verified/points_label/83f1ff21744e71ad2690c0a5b39562ad.seg 04379243 +03001627/points/49aa713bec70ee1f1104b8f54582c707.pts 03001627/expert_verified/points_label/49aa713bec70ee1f1104b8f54582c707.seg 03001627 +03001627/points/9231ef07326eae09b04cb542e2c50eb4.pts 03001627/expert_verified/points_label/9231ef07326eae09b04cb542e2c50eb4.seg 03001627 +03642806/points/b211cfb105e9f97e6436916a86a90ed7.pts 03642806/expert_verified/points_label/b211cfb105e9f97e6436916a86a90ed7.seg 03642806 +03001627/points/fdfedb5bb8cd35374233148ffd345970.pts 03001627/expert_verified/points_label/fdfedb5bb8cd35374233148ffd345970.seg 03001627 +04379243/points/3037fac5bc67207e23fa92d98173c06f.pts 04379243/expert_verified/points_label/3037fac5bc67207e23fa92d98173c06f.seg 04379243 +04379243/points/40d0dd3fe786e120d75c27ddd792e41a.pts 04379243/expert_verified/points_label/40d0dd3fe786e120d75c27ddd792e41a.seg 04379243 +03001627/points/e6ea5e70c2f29d881e8fd793667dc14f.pts 03001627/expert_verified/points_label/e6ea5e70c2f29d881e8fd793667dc14f.seg 03001627 +04379243/points/9502eecc3a057115b129901f80d24b7b.pts 04379243/expert_verified/points_label/9502eecc3a057115b129901f80d24b7b.seg 04379243 +03001627/points/e68bb6f55e2454fac7f1f7c0570e288d.pts 03001627/expert_verified/points_label/e68bb6f55e2454fac7f1f7c0570e288d.seg 03001627 +02691156/points/9bd8d0fa75bc21c5e3375a6b96a1d765.pts 02691156/expert_verified/points_label/9bd8d0fa75bc21c5e3375a6b96a1d765.seg 02691156 +02958343/points/1714b6e57c8c4983fb1aad5dae793ff4.pts 02958343/expert_verified/points_label/1714b6e57c8c4983fb1aad5dae793ff4.seg 02958343 +02691156/points/8a84a26158da1db7668586dcfb752ad.pts 02691156/expert_verified/points_label/8a84a26158da1db7668586dcfb752ad.seg 02691156 +02691156/points/36d8c865f766e3e097872638b21438e3.pts 02691156/expert_verified/points_label/36d8c865f766e3e097872638b21438e3.seg 02691156 +03001627/points/96e8a51b1680b756e99481ddc3bbddfb.pts 03001627/expert_verified/points_label/96e8a51b1680b756e99481ddc3bbddfb.seg 03001627 +02958343/points/37ad66d0433beb633df8f4ac45647158.pts 02958343/expert_verified/points_label/37ad66d0433beb633df8f4ac45647158.seg 02958343 +04379243/points/56a57ef7c3385c9f2f38c0d2792fb5e.pts 04379243/expert_verified/points_label/56a57ef7c3385c9f2f38c0d2792fb5e.seg 04379243 +03467517/points/dbdf45cab0adbded1f260c1b356c52ce.pts 03467517/expert_verified/points_label/dbdf45cab0adbded1f260c1b356c52ce.seg 03467517 +04379243/points/868bab5194e93577858699aaad4acee4.pts 04379243/expert_verified/points_label/868bab5194e93577858699aaad4acee4.seg 04379243 +04379243/points/2bbd62449b56abee659dda512294c744.pts 04379243/expert_verified/points_label/2bbd62449b56abee659dda512294c744.seg 04379243 +04379243/points/a18aa2d20d516333daf1f22b6daf05ed.pts 04379243/expert_verified/points_label/a18aa2d20d516333daf1f22b6daf05ed.seg 04379243 +03636649/points/7a2362fbddbee9a4d197f67767b32741.pts 03636649/expert_verified/points_label/7a2362fbddbee9a4d197f67767b32741.seg 03636649 +03636649/points/f9259d31df38bd5decd204cd7180226d.pts 03636649/expert_verified/points_label/f9259d31df38bd5decd204cd7180226d.seg 03636649 +04379243/points/54e85b248576c4eb57cd80d4b17e7e11.pts 04379243/expert_verified/points_label/54e85b248576c4eb57cd80d4b17e7e11.seg 04379243 +04379243/points/1299579419252fa954b02959579aa6bb.pts 04379243/expert_verified/points_label/1299579419252fa954b02959579aa6bb.seg 04379243 +04379243/points/49ad167497a2af8c9672e39f89e4622e.pts 04379243/expert_verified/points_label/49ad167497a2af8c9672e39f89e4622e.seg 04379243 +04379243/points/55221b101eec29dc656a19d1d18fdbac.pts 04379243/expert_verified/points_label/55221b101eec29dc656a19d1d18fdbac.seg 04379243 +04379243/points/e8870f3190f6b8d4bd1025bd755a15aa.pts 04379243/expert_verified/points_label/e8870f3190f6b8d4bd1025bd755a15aa.seg 04379243 +02691156/points/9818f0b88fed05b24b0a1bcf2fb497ec.pts 02691156/expert_verified/points_label/9818f0b88fed05b24b0a1bcf2fb497ec.seg 02691156 +02691156/points/9ba460913d86466f62347b4731688b0f.pts 02691156/expert_verified/points_label/9ba460913d86466f62347b4731688b0f.seg 02691156 +04379243/points/574447022c4473d455f46d55537192b6.pts 04379243/expert_verified/points_label/574447022c4473d455f46d55537192b6.seg 04379243 +04379243/points/7b5b7bfa8580e913e2580b23e60e4674.pts 04379243/expert_verified/points_label/7b5b7bfa8580e913e2580b23e60e4674.seg 04379243 +04225987/points/48f26ddc704fec2f379c6a1d59ef7283.pts 04225987/expert_verified/points_label/48f26ddc704fec2f379c6a1d59ef7283.seg 04225987 +04379243/points/b7821e69687d767aab610b0c94236463.pts 04379243/expert_verified/points_label/b7821e69687d767aab610b0c94236463.seg 04379243 +02691156/points/e42443669339a6c1a5a118bd15e6e34f.pts 02691156/expert_verified/points_label/e42443669339a6c1a5a118bd15e6e34f.seg 02691156 +04379243/points/2444551d00693a0fab610b0c94236463.pts 04379243/expert_verified/points_label/2444551d00693a0fab610b0c94236463.seg 04379243 +03467517/points/5e452914684ea7fc398707f20de9db08.pts 03467517/expert_verified/points_label/5e452914684ea7fc398707f20de9db08.seg 03467517 +03001627/points/cc6840207c0cf55db30e42459dcb06f.pts 03001627/expert_verified/points_label/cc6840207c0cf55db30e42459dcb06f.seg 03001627 +04379243/points/9046b2e610065fe5a5d95e73eecd308a.pts 04379243/expert_verified/points_label/9046b2e610065fe5a5d95e73eecd308a.seg 04379243 +03467517/points/c651a91562b86ed8edb9371445f615ae.pts 03467517/expert_verified/points_label/c651a91562b86ed8edb9371445f615ae.seg 03467517 +03001627/points/9bb6d3d76d4f5ba94b3c42e318f3affc.pts 03001627/expert_verified/points_label/9bb6d3d76d4f5ba94b3c42e318f3affc.seg 03001627 +03001627/points/7fb336186da77367962800be79c6e52.pts 03001627/expert_verified/points_label/7fb336186da77367962800be79c6e52.seg 03001627 +04379243/points/b69b2ff85d0ec661d8f9dd7647048a0c.pts 04379243/expert_verified/points_label/b69b2ff85d0ec661d8f9dd7647048a0c.seg 04379243 +03001627/points/d2815e678f173616e6cfc789522bfbab.pts 03001627/expert_verified/points_label/d2815e678f173616e6cfc789522bfbab.seg 03001627 +03636649/points/b8350fcf08ff0b2ca950bf8f33cff658.pts 03636649/expert_verified/points_label/b8350fcf08ff0b2ca950bf8f33cff658.seg 03636649 +04379243/points/202e7b5c3ec079e299e8bf807e902261.pts 04379243/expert_verified/points_label/202e7b5c3ec079e299e8bf807e902261.seg 04379243 +03001627/points/c8938f54fecab41e77cd061c90fcdb44.pts 03001627/expert_verified/points_label/c8938f54fecab41e77cd061c90fcdb44.seg 03001627 +04379243/points/894e095c7036c8411933ffef19678834.pts 04379243/expert_verified/points_label/894e095c7036c8411933ffef19678834.seg 04379243 +03001627/points/4362e715455f42ba9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/4362e715455f42ba9b9f2eb77f5e247e.seg 03001627 +04379243/points/8963760f8bec0fee7f807d3c406ee.pts 04379243/expert_verified/points_label/8963760f8bec0fee7f807d3c406ee.seg 04379243 +03948459/points/4acb6494e3aaeb39998978df244b5bd.pts 03948459/expert_verified/points_label/4acb6494e3aaeb39998978df244b5bd.seg 03948459 +03636649/points/c1b939cc403a0662664b3b9b23ddfcbc.pts 03636649/expert_verified/points_label/c1b939cc403a0662664b3b9b23ddfcbc.seg 03636649 +04379243/points/e64876f5590e6fb7c3bd24f986301745.pts 04379243/expert_verified/points_label/e64876f5590e6fb7c3bd24f986301745.seg 04379243 +02691156/points/b8ce3803485b620b2c674305897e1782.pts 02691156/expert_verified/points_label/b8ce3803485b620b2c674305897e1782.seg 02691156 +03636649/points/a60c6cf7d4893f2ba26bf7a8fd4719ad.pts 03636649/expert_verified/points_label/a60c6cf7d4893f2ba26bf7a8fd4719ad.seg 03636649 +04379243/points/6ca66a443e651c1423500a5b036df62e.pts 04379243/expert_verified/points_label/6ca66a443e651c1423500a5b036df62e.seg 04379243 +04379243/points/51930b149cf6125373fa072a624ce947.pts 04379243/expert_verified/points_label/51930b149cf6125373fa072a624ce947.seg 04379243 +02691156/points/eb658ff31f0becea1d0f8853f6d023e3.pts 02691156/expert_verified/points_label/eb658ff31f0becea1d0f8853f6d023e3.seg 02691156 +03642806/points/3f45cde6f7a13138e256fb3794905772.pts 03642806/expert_verified/points_label/3f45cde6f7a13138e256fb3794905772.seg 03642806 +03001627/points/ea572cc193b804399c66df0f068d2a36.pts 03001627/expert_verified/points_label/ea572cc193b804399c66df0f068d2a36.seg 03001627 +03001627/points/9e0a0ad80be6df7789d2595edb5088ee.pts 03001627/expert_verified/points_label/9e0a0ad80be6df7789d2595edb5088ee.seg 03001627 +04379243/points/8eed35fd5b777acf58316b27df6c8e87.pts 04379243/expert_verified/points_label/8eed35fd5b777acf58316b27df6c8e87.seg 04379243 +03642806/points/5baaa726f51cd09b507f3bf1d3472684.pts 03642806/expert_verified/points_label/5baaa726f51cd09b507f3bf1d3472684.seg 03642806 +02691156/points/789f032dccc6092977b7d0d4764c121d.pts 02691156/expert_verified/points_label/789f032dccc6092977b7d0d4764c121d.seg 02691156 +03001627/points/9682d28e03acd2e3735013f3db728e20.pts 03001627/expert_verified/points_label/9682d28e03acd2e3735013f3db728e20.seg 03001627 +02958343/points/b50f9931670e25ef44ccce632b473b8c.pts 02958343/expert_verified/points_label/b50f9931670e25ef44ccce632b473b8c.seg 02958343 +03467517/points/d3972d599036251369da8e7f9a803d12.pts 03467517/expert_verified/points_label/d3972d599036251369da8e7f9a803d12.seg 03467517 +02691156/points/329987191cce68bfe64acd170567d820.pts 02691156/expert_verified/points_label/329987191cce68bfe64acd170567d820.seg 02691156 +03636649/points/ab3e153cd23e992b576a354bb9319732.pts 03636649/expert_verified/points_label/ab3e153cd23e992b576a354bb9319732.seg 03636649 +04379243/points/f850a69b0d308fbc19fb4103277a6b93.pts 04379243/expert_verified/points_label/f850a69b0d308fbc19fb4103277a6b93.seg 04379243 +04379243/points/1645b28322131b6258c407efcf93be6b.pts 04379243/expert_verified/points_label/1645b28322131b6258c407efcf93be6b.seg 04379243 +03001627/points/195464ae11f6bfe1cba091e036bf65ed.pts 03001627/expert_verified/points_label/195464ae11f6bfe1cba091e036bf65ed.seg 03001627 +02691156/points/edd9583988b62c90328f15e6c60d0e90.pts 02691156/expert_verified/points_label/edd9583988b62c90328f15e6c60d0e90.seg 02691156 +04225987/points/36aaae334d636ec28043db94fbc8c982.pts 04225987/expert_verified/points_label/36aaae334d636ec28043db94fbc8c982.seg 04225987 +04379243/points/c3c467718eb9b2a313f96345312df593.pts 04379243/expert_verified/points_label/c3c467718eb9b2a313f96345312df593.seg 04379243 +02691156/points/a1848a4a69b14704ca8607f540cc62ba.pts 02691156/expert_verified/points_label/a1848a4a69b14704ca8607f540cc62ba.seg 02691156 +02958343/points/c8bd4d0ac34266ffaaa232d0915adae9.pts 02958343/expert_verified/points_label/c8bd4d0ac34266ffaaa232d0915adae9.seg 02958343 +04379243/points/ad61a5bc7cba29b88cc413950b617e8f.pts 04379243/expert_verified/points_label/ad61a5bc7cba29b88cc413950b617e8f.seg 04379243 +03642806/points/466ea85bb4653ba3a715ae636b111d77.pts 03642806/expert_verified/points_label/466ea85bb4653ba3a715ae636b111d77.seg 03642806 +03001627/points/e93714e5553f63619215045784774049.pts 03001627/expert_verified/points_label/e93714e5553f63619215045784774049.seg 03001627 +03636649/points/b88c9a7aaab268fb42b08fbc749346d6.pts 03636649/expert_verified/points_label/b88c9a7aaab268fb42b08fbc749346d6.seg 03636649 +03636649/points/6ba931adfa36c7965208aab875b932bc.pts 03636649/expert_verified/points_label/6ba931adfa36c7965208aab875b932bc.seg 03636649 +03001627/points/e3479f55f5894bb3c7f1f7c0570e288d.pts 03001627/expert_verified/points_label/e3479f55f5894bb3c7f1f7c0570e288d.seg 03001627 +03467517/points/4c5288cc18896f8f352e5d4d2615db5b.pts 03467517/expert_verified/points_label/4c5288cc18896f8f352e5d4d2615db5b.seg 03467517 +03001627/points/631e102e9a689339b0ec386df15ab64f.pts 03001627/expert_verified/points_label/631e102e9a689339b0ec386df15ab64f.seg 03001627 +04379243/points/6daed91ae491c9cbe22ea6d770699e4b.pts 04379243/expert_verified/points_label/6daed91ae491c9cbe22ea6d770699e4b.seg 04379243 +03001627/points/40e73a326cf95d0361c93c4994c91bd1.pts 03001627/expert_verified/points_label/40e73a326cf95d0361c93c4994c91bd1.seg 03001627 +03467517/points/dc7708c870000008a24eeca91f583600.pts 03467517/expert_verified/points_label/dc7708c870000008a24eeca91f583600.seg 03467517 +03001627/points/1ac6531a337de85f2f7628d6bf38bcc4.pts 03001627/expert_verified/points_label/1ac6531a337de85f2f7628d6bf38bcc4.seg 03001627 +04379243/points/5191d64e9a1b9664bfdcc70dcc16baa1.pts 04379243/expert_verified/points_label/5191d64e9a1b9664bfdcc70dcc16baa1.seg 04379243 +03636649/points/c4dc0ac169c91ff29f8c3d2002c77ddb.pts 03636649/expert_verified/points_label/c4dc0ac169c91ff29f8c3d2002c77ddb.seg 03636649 +03624134/points/b8648ae17fb9937949f73a97204d432b.pts 03624134/expert_verified/points_label/b8648ae17fb9937949f73a97204d432b.seg 03624134 +04379243/points/a465210c23b0136d7afee304cce81d6f.pts 04379243/expert_verified/points_label/a465210c23b0136d7afee304cce81d6f.seg 04379243 +03001627/points/513686d6d63a1d8e577b5d737869717e.pts 03001627/expert_verified/points_label/513686d6d63a1d8e577b5d737869717e.seg 03001627 +03624134/points/bee1a473472639e25ca3862a7efa6401.pts 03624134/expert_verified/points_label/bee1a473472639e25ca3862a7efa6401.seg 03624134 +02691156/points/adb3ea03d7b954255e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/adb3ea03d7b954255e9e2656aff7dd5b.seg 02691156 +02691156/points/959f28c6724979ef9a6e43b878d5b335.pts 02691156/expert_verified/points_label/959f28c6724979ef9a6e43b878d5b335.seg 02691156 +04379243/points/dec1d2cf8a4563d36cb02543e4df83bf.pts 04379243/expert_verified/points_label/dec1d2cf8a4563d36cb02543e4df83bf.seg 04379243 +03790512/points/a9c432d1dc4034762a45a87054fa7272.pts 03790512/expert_verified/points_label/a9c432d1dc4034762a45a87054fa7272.seg 03790512 +03001627/points/1b5e876f3559c231532a8e162f399205.pts 03001627/expert_verified/points_label/1b5e876f3559c231532a8e162f399205.seg 03001627 +04379243/points/82e5309809e455d5f15fed2243deb166.pts 04379243/expert_verified/points_label/82e5309809e455d5f15fed2243deb166.seg 04379243 +03467517/points/8f1f54d337bf6ccac782e6226a4f593e.pts 03467517/expert_verified/points_label/8f1f54d337bf6ccac782e6226a4f593e.seg 03467517 +04379243/points/67d97102f9c54cc95512673aa47c7e3d.pts 04379243/expert_verified/points_label/67d97102f9c54cc95512673aa47c7e3d.seg 04379243 +02691156/points/e0cc4f538a8da2d65d3bbd70fc7759b7.pts 02691156/expert_verified/points_label/e0cc4f538a8da2d65d3bbd70fc7759b7.seg 02691156 +04379243/points/d0008b042256fb5f7ab911835312d4f1.pts 04379243/expert_verified/points_label/d0008b042256fb5f7ab911835312d4f1.seg 04379243 +03467517/points/44c05e219618a6395b3335548350bdee.pts 03467517/expert_verified/points_label/44c05e219618a6395b3335548350bdee.seg 03467517 +03001627/points/3f7808c221b01668b4d174e5c61f344.pts 03001627/expert_verified/points_label/3f7808c221b01668b4d174e5c61f344.seg 03001627 +03467517/points/51abcb617b2faf3a24eeca91f583600.pts 03467517/expert_verified/points_label/51abcb617b2faf3a24eeca91f583600.seg 03467517 +03636649/points/f38370fc4c112017a6e7138fdd58748.pts 03636649/expert_verified/points_label/f38370fc4c112017a6e7138fdd58748.seg 03636649 +03001627/points/37607ea19e352af4fffc97a61124b1a9.pts 03001627/expert_verified/points_label/37607ea19e352af4fffc97a61124b1a9.seg 03001627 +02958343/points/2cb6de89f5b6e702b626f6a649199824.pts 02958343/expert_verified/points_label/2cb6de89f5b6e702b626f6a649199824.seg 02958343 +04099429/points/d781243cc1d1d2e91a0ec553feb1c2c3.pts 04099429/expert_verified/points_label/d781243cc1d1d2e91a0ec553feb1c2c3.seg 04099429 +04379243/points/900afcc9f0f5fbfd858699aaad4acee4.pts 04379243/expert_verified/points_label/900afcc9f0f5fbfd858699aaad4acee4.seg 04379243 +03001627/points/d13eb19745344ae5fb0eb7e753c06942.pts 03001627/expert_verified/points_label/d13eb19745344ae5fb0eb7e753c06942.seg 03001627 +02958343/points/5785192c95cdd67b704715417c0f83c1.pts 02958343/expert_verified/points_label/5785192c95cdd67b704715417c0f83c1.seg 02958343 +03001627/points/5bb5b15807158f71504721639e19f609.pts 03001627/expert_verified/points_label/5bb5b15807158f71504721639e19f609.seg 03001627 +03636649/points/ba05f660341b7b7b70be09f44cb2fef5.pts 03636649/expert_verified/points_label/ba05f660341b7b7b70be09f44cb2fef5.seg 03636649 +02691156/points/97066012fbca5983c74417871493eae8.pts 02691156/expert_verified/points_label/97066012fbca5983c74417871493eae8.seg 02691156 +03001627/points/4499729e53c858ae71a782a4379556c7.pts 03001627/expert_verified/points_label/4499729e53c858ae71a782a4379556c7.seg 03001627 +04379243/points/41d280b7db61ebddfebad4f49b26ec52.pts 04379243/expert_verified/points_label/41d280b7db61ebddfebad4f49b26ec52.seg 04379243 +02773838/points/30bf69aa24dbb3fc9de193e488fc4dce.pts 02773838/expert_verified/points_label/30bf69aa24dbb3fc9de193e488fc4dce.seg 02773838 +03467517/points/6c9a9c0e2af9d5b35f713e773d664ec2.pts 03467517/expert_verified/points_label/6c9a9c0e2af9d5b35f713e773d664ec2.seg 03467517 +04379243/points/f979c7a650d29ea819fb4103277a6b93.pts 04379243/expert_verified/points_label/f979c7a650d29ea819fb4103277a6b93.seg 04379243 +03001627/points/b631b78c2dcc748cba5342d638d0c267.pts 03001627/expert_verified/points_label/b631b78c2dcc748cba5342d638d0c267.seg 03001627 +03467517/points/d2ad57f36e00c602baba3b7560fe62f4.pts 03467517/expert_verified/points_label/d2ad57f36e00c602baba3b7560fe62f4.seg 03467517 +04379243/points/5771d5a3084b3ca3a2d7b309863cb1b.pts 04379243/expert_verified/points_label/5771d5a3084b3ca3a2d7b309863cb1b.seg 04379243 +03636649/points/2d638c6b6b2feb9248da169d95204ce2.pts 03636649/expert_verified/points_label/2d638c6b6b2feb9248da169d95204ce2.seg 03636649 +02958343/points/63a4e46bbbd855fc2b63d3b2a8c4e8b.pts 02958343/expert_verified/points_label/63a4e46bbbd855fc2b63d3b2a8c4e8b.seg 02958343 +04379243/points/8c67fd5a15e8d9defebad4f49b26ec52.pts 04379243/expert_verified/points_label/8c67fd5a15e8d9defebad4f49b26ec52.seg 04379243 +03467517/points/28c3903b29f6b38363e148e250c0340d.pts 03467517/expert_verified/points_label/28c3903b29f6b38363e148e250c0340d.seg 03467517 +04379243/points/ab2967188299bea54cb0654f4cfa9684.pts 04379243/expert_verified/points_label/ab2967188299bea54cb0654f4cfa9684.seg 04379243 +02691156/points/a9a7f21271b3efbaf446f92b52bbd82a.pts 02691156/expert_verified/points_label/a9a7f21271b3efbaf446f92b52bbd82a.seg 02691156 +04379243/points/c3e43144fd61c56f19fb4103277a6b93.pts 04379243/expert_verified/points_label/c3e43144fd61c56f19fb4103277a6b93.seg 04379243 +03001627/points/7fcde5fc8e023dd2a6fee8e2140acec9.pts 03001627/expert_verified/points_label/7fcde5fc8e023dd2a6fee8e2140acec9.seg 03001627 +03790512/points/70d9cc5115bfedeeab548456bc75847f.pts 03790512/expert_verified/points_label/70d9cc5115bfedeeab548456bc75847f.seg 03790512 +03001627/points/3c0dd3719baecf3319fb4103277a6b93.pts 03001627/expert_verified/points_label/3c0dd3719baecf3319fb4103277a6b93.seg 03001627 +03636649/points/55077c2175d97b8889ab11a408196888.pts 03636649/expert_verified/points_label/55077c2175d97b8889ab11a408196888.seg 03636649 +04379243/points/71fc8c7cdb48978282fa4d4f2c19b2ce.pts 04379243/expert_verified/points_label/71fc8c7cdb48978282fa4d4f2c19b2ce.seg 04379243 +04379243/points/f0d5eefef970fa4b9f2349486c570dd4.pts 04379243/expert_verified/points_label/f0d5eefef970fa4b9f2349486c570dd4.seg 04379243 +03642806/points/90c01fd78513bb99c9b20aa1b8066c46.pts 03642806/expert_verified/points_label/90c01fd78513bb99c9b20aa1b8066c46.seg 03642806 +04379243/points/ca6c07357ba5125b8e2adb29857f8a1.pts 04379243/expert_verified/points_label/ca6c07357ba5125b8e2adb29857f8a1.seg 04379243 +04379243/points/634bcd3197e337aafe4e4de1adda2150.pts 04379243/expert_verified/points_label/634bcd3197e337aafe4e4de1adda2150.seg 04379243 +04379243/points/7b411de42d4960eb6e25f3efedf6785f.pts 04379243/expert_verified/points_label/7b411de42d4960eb6e25f3efedf6785f.seg 04379243 +04379243/points/878414eb6e86494d9a8ef44e1d2c5b75.pts 04379243/expert_verified/points_label/878414eb6e86494d9a8ef44e1d2c5b75.seg 04379243 +03001627/points/f3fa7bd00b76f6a87a8a6b9421844d96.pts 03001627/expert_verified/points_label/f3fa7bd00b76f6a87a8a6b9421844d96.seg 03001627 +03467517/points/a2c1ee6a7ddb50a493f0194265a9746c.pts 03467517/expert_verified/points_label/a2c1ee6a7ddb50a493f0194265a9746c.seg 03467517 +04379243/points/25bc205f6de491f4ccde40b1205ec7ff.pts 04379243/expert_verified/points_label/25bc205f6de491f4ccde40b1205ec7ff.seg 04379243 +03636649/points/771d4def2e44bc169eb34048e600e1ea.pts 03636649/expert_verified/points_label/771d4def2e44bc169eb34048e600e1ea.seg 03636649 +03624134/points/6ebe2a22b8d9d70862a95b942081dfee.pts 03624134/expert_verified/points_label/6ebe2a22b8d9d70862a95b942081dfee.seg 03624134 +02691156/points/9b1fc3881a5335cb44012f72ba1e15a8.pts 02691156/expert_verified/points_label/9b1fc3881a5335cb44012f72ba1e15a8.seg 02691156 +03001627/points/3dc252fd90d82b18c9be65dfbd21428b.pts 03001627/expert_verified/points_label/3dc252fd90d82b18c9be65dfbd21428b.seg 03001627 +04379243/points/f6f180c3e72caacb5077539b37310c29.pts 04379243/expert_verified/points_label/f6f180c3e72caacb5077539b37310c29.seg 04379243 +03642806/points/25bc168b214b54799e28e9cf32e5157.pts 03642806/expert_verified/points_label/25bc168b214b54799e28e9cf32e5157.seg 03642806 +04379243/points/ac9fae8af57729945eee45c00c4de9d3.pts 04379243/expert_verified/points_label/ac9fae8af57729945eee45c00c4de9d3.seg 04379243 +03001627/points/e8126f9e2d106620d2f33aaf794b5932.pts 03001627/expert_verified/points_label/e8126f9e2d106620d2f33aaf794b5932.seg 03001627 +03624134/points/3dc5a6d79ed591bda709dec9a148b2fe.pts 03624134/expert_verified/points_label/3dc5a6d79ed591bda709dec9a148b2fe.seg 03624134 +04379243/points/8f73278956fecb80327289c00b6dc9ca.pts 04379243/expert_verified/points_label/8f73278956fecb80327289c00b6dc9ca.seg 04379243 +03948459/points/5f46578efd2c65e5d4ac2f5fcaa742ac.pts 03948459/expert_verified/points_label/5f46578efd2c65e5d4ac2f5fcaa742ac.seg 03948459 +03624134/points/a05ea45d396c86784e52b614e584a543.pts 03624134/expert_verified/points_label/a05ea45d396c86784e52b614e584a543.seg 03624134 +03001627/points/cd939609247df917d9d3572bbd9cf789.pts 03001627/expert_verified/points_label/cd939609247df917d9d3572bbd9cf789.seg 03001627 +03261776/points/17c9866b42ae1831df4cfe396cee719e.pts 03261776/expert_verified/points_label/17c9866b42ae1831df4cfe396cee719e.seg 03261776 +03797390/points/3d3e993f7baa4d7ef1ff24a8b1564a36.pts 03797390/expert_verified/points_label/3d3e993f7baa4d7ef1ff24a8b1564a36.seg 03797390 +03467517/points/36b49aff54f6d7e893f0194265a9746c.pts 03467517/expert_verified/points_label/36b49aff54f6d7e893f0194265a9746c.seg 03467517 +02691156/points/48df2496242053da4ee0fb6a51564c3.pts 02691156/expert_verified/points_label/48df2496242053da4ee0fb6a51564c3.seg 02691156 +04379243/points/7ad23def902ea4f37b7a2c2624e46d0a.pts 04379243/expert_verified/points_label/7ad23def902ea4f37b7a2c2624e46d0a.seg 04379243 +04379243/points/1a8fe5baa2d4b5f7ee84261b3d20656.pts 04379243/expert_verified/points_label/1a8fe5baa2d4b5f7ee84261b3d20656.seg 04379243 +03467517/points/d685415d4fcd3205a24eeca91f583600.pts 03467517/expert_verified/points_label/d685415d4fcd3205a24eeca91f583600.seg 03467517 +02958343/points/8e308d28d463427f43f0e92e826556b8.pts 02958343/expert_verified/points_label/8e308d28d463427f43f0e92e826556b8.seg 02958343 +04379243/points/dc68436ab1a576f6573d2c9ac4b23fdf.pts 04379243/expert_verified/points_label/dc68436ab1a576f6573d2c9ac4b23fdf.seg 04379243 +04379243/points/1a153612bcdab3e23cc149415a408229.pts 04379243/expert_verified/points_label/1a153612bcdab3e23cc149415a408229.seg 04379243 +03001627/points/19ce953da9aa8065d747a43c11e738e9.pts 03001627/expert_verified/points_label/19ce953da9aa8065d747a43c11e738e9.seg 03001627 +04379243/points/db2d4f781756e687d8864caa856253b.pts 04379243/expert_verified/points_label/db2d4f781756e687d8864caa856253b.seg 04379243 +04379243/points/d8f851bbc98dccc23fa92d98173c06f.pts 04379243/expert_verified/points_label/d8f851bbc98dccc23fa92d98173c06f.seg 04379243 +03467517/points/e585e31db7568c4cf0e1c0df18936d05.pts 03467517/expert_verified/points_label/e585e31db7568c4cf0e1c0df18936d05.seg 03467517 +03001627/points/98ac0106ad244505e04fc3fcc1c852e0.pts 03001627/expert_verified/points_label/98ac0106ad244505e04fc3fcc1c852e0.seg 03001627 +03001627/points/1b81441b7e597235d61420a53a0cb96d.pts 03001627/expert_verified/points_label/1b81441b7e597235d61420a53a0cb96d.seg 03001627 +03001627/points/918145be863f7aeaf050758b903e6054.pts 03001627/expert_verified/points_label/918145be863f7aeaf050758b903e6054.seg 03001627 +02691156/points/1af4b32eafffb0f7ee60c37cbf99c1c.pts 02691156/expert_verified/points_label/1af4b32eafffb0f7ee60c37cbf99c1c.seg 02691156 +03636649/points/f4e1a4032b1686cec35131da26f8061a.pts 03636649/expert_verified/points_label/f4e1a4032b1686cec35131da26f8061a.seg 03636649 +04379243/points/9c4dfafdbd7f9b76c955e5ed03ef3a2f.pts 04379243/expert_verified/points_label/9c4dfafdbd7f9b76c955e5ed03ef3a2f.seg 04379243 +02691156/points/80b8f4da6b77eb66d208f79049825a82.pts 02691156/expert_verified/points_label/80b8f4da6b77eb66d208f79049825a82.seg 02691156 +03642806/points/de2e95eac460c361e862e3cac45aa769.pts 03642806/expert_verified/points_label/de2e95eac460c361e862e3cac45aa769.seg 03642806 +04379243/points/e2571e4eba2d9f5eab610b0c94236463.pts 04379243/expert_verified/points_label/e2571e4eba2d9f5eab610b0c94236463.seg 04379243 +04379243/points/a0445e4888d56666b9d7c2fc41e80228.pts 04379243/expert_verified/points_label/a0445e4888d56666b9d7c2fc41e80228.seg 04379243 +03001627/points/873c017f35957717b56a13a4b2372aa4.pts 03001627/expert_verified/points_label/873c017f35957717b56a13a4b2372aa4.seg 03001627 +03001627/points/3af90da238ac4ddbf91663a74ccd2338.pts 03001627/expert_verified/points_label/3af90da238ac4ddbf91663a74ccd2338.seg 03001627 +02958343/points/9698be0fd3516f01fbeda5389ab05f5f.pts 02958343/expert_verified/points_label/9698be0fd3516f01fbeda5389ab05f5f.seg 02958343 +03790512/points/655b9dd9425cc3a12a45a87054fa7272.pts 03790512/expert_verified/points_label/655b9dd9425cc3a12a45a87054fa7272.seg 03790512 +04379243/points/ec1c92efffb9ee78beedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/ec1c92efffb9ee78beedb4c8fd29e2d1.seg 04379243 +04379243/points/3b7fc97192e483ebb0bf045ee98272fc.pts 04379243/expert_verified/points_label/3b7fc97192e483ebb0bf045ee98272fc.seg 04379243 +03467517/points/8c3d3e69d03d3443e84e459fb01822f.pts 03467517/expert_verified/points_label/8c3d3e69d03d3443e84e459fb01822f.seg 03467517 +02691156/points/e0058b4948f87d3b87697d3904b168b.pts 02691156/expert_verified/points_label/e0058b4948f87d3b87697d3904b168b.seg 02691156 +03001627/points/4428b7dc4b6696812905b6e26038a78.pts 03001627/expert_verified/points_label/4428b7dc4b6696812905b6e26038a78.seg 03001627 +03636649/points/f7093dd024fd09fc7219d6d5c4afbaff.pts 03636649/expert_verified/points_label/f7093dd024fd09fc7219d6d5c4afbaff.seg 03636649 +04379243/points/7d0c5e28089c2b7bd99e852ee772dfa4.pts 04379243/expert_verified/points_label/7d0c5e28089c2b7bd99e852ee772dfa4.seg 04379243 +03636649/points/4916f793d87dd184d42b9650f19dd425.pts 03636649/expert_verified/points_label/4916f793d87dd184d42b9650f19dd425.seg 03636649 +04379243/points/1ffcbc064f473b7de7c13848b2d8f5ec.pts 04379243/expert_verified/points_label/1ffcbc064f473b7de7c13848b2d8f5ec.seg 04379243 +03636649/points/e180510d07b65fff571108a6d1e94edd.pts 03636649/expert_verified/points_label/e180510d07b65fff571108a6d1e94edd.seg 03636649 +03636649/points/d9f6bd064c9fd456fcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/d9f6bd064c9fd456fcb8d8c6d4df8143.seg 03636649 +04379243/points/ec81c49ee12e8a70fd06de9ba37d44bd.pts 04379243/expert_verified/points_label/ec81c49ee12e8a70fd06de9ba37d44bd.seg 04379243 +03636649/points/4a868756ae6404a5c0bc57897eddf6f.pts 03636649/expert_verified/points_label/4a868756ae6404a5c0bc57897eddf6f.seg 03636649 +02958343/points/9c827e532de4967285089a13cc567dbd.pts 02958343/expert_verified/points_label/9c827e532de4967285089a13cc567dbd.seg 02958343 +03797390/points/1c9f9e25c654cbca3c71bf3f4dd78475.pts 03797390/expert_verified/points_label/1c9f9e25c654cbca3c71bf3f4dd78475.seg 03797390 +03001627/points/ca3670f77268f899febad4f49b26ec52.pts 03001627/expert_verified/points_label/ca3670f77268f899febad4f49b26ec52.seg 03001627 +04379243/points/9b8e6eb835f0c8bcf37af16b2893f1d4.pts 04379243/expert_verified/points_label/9b8e6eb835f0c8bcf37af16b2893f1d4.seg 04379243 +03001627/points/5c9d582488732ee0d7f7a4c4609b0913.pts 03001627/expert_verified/points_label/5c9d582488732ee0d7f7a4c4609b0913.seg 03001627 +04379243/points/684ccc0f629ee45cab610b0c94236463.pts 04379243/expert_verified/points_label/684ccc0f629ee45cab610b0c94236463.seg 04379243 +03001627/points/4913388a4c94547a81806e3880250dff.pts 03001627/expert_verified/points_label/4913388a4c94547a81806e3880250dff.seg 03001627 +03636649/points/73378b714c5bfed2b922d818b19db1e.pts 03636649/expert_verified/points_label/73378b714c5bfed2b922d818b19db1e.seg 03636649 +03001627/points/4a89a789f817ab5414038d588fd1342f.pts 03001627/expert_verified/points_label/4a89a789f817ab5414038d588fd1342f.seg 03001627 +04379243/points/df7761a3b4ac638c9eaceb124b71b7be.pts 04379243/expert_verified/points_label/df7761a3b4ac638c9eaceb124b71b7be.seg 04379243 +03001627/points/46557f689f4cf5dd2acd2bb6205825cb.pts 03001627/expert_verified/points_label/46557f689f4cf5dd2acd2bb6205825cb.seg 03001627 +04379243/points/2db1f557e247ded7e907b6d9dc1d71b7.pts 04379243/expert_verified/points_label/2db1f557e247ded7e907b6d9dc1d71b7.seg 04379243 +04379243/points/b69d9e876e7a80a29f2349486c570dd4.pts 04379243/expert_verified/points_label/b69d9e876e7a80a29f2349486c570dd4.seg 04379243 +04379243/points/a94ea7183f27073248c0c0980e363341.pts 04379243/expert_verified/points_label/a94ea7183f27073248c0c0980e363341.seg 04379243 +03636649/points/8f85c2195890ccf671f0940f5ed452dc.pts 03636649/expert_verified/points_label/8f85c2195890ccf671f0940f5ed452dc.seg 03636649 +02691156/points/cc80380c511ec8e2c91a9d486db717.pts 02691156/expert_verified/points_label/cc80380c511ec8e2c91a9d486db717.seg 02691156 +03642806/points/6b61ef17b4f45050b598e8984f11eb0c.pts 03642806/expert_verified/points_label/6b61ef17b4f45050b598e8984f11eb0c.seg 03642806 +04379243/points/d9ce0b512e0420f8be95ff480950e9ef.pts 04379243/expert_verified/points_label/d9ce0b512e0420f8be95ff480950e9ef.seg 04379243 +04379243/points/c27a1c6a26642c907ecc778b34d42f32.pts 04379243/expert_verified/points_label/c27a1c6a26642c907ecc778b34d42f32.seg 04379243 +04379243/points/debd06d3176a5b728cbb8bac2032149c.pts 04379243/expert_verified/points_label/debd06d3176a5b728cbb8bac2032149c.seg 04379243 +04099429/points/fa07813a89527d195d1df55cbe0874aa.pts 04099429/expert_verified/points_label/fa07813a89527d195d1df55cbe0874aa.seg 04099429 +03001627/points/2a98a638f675f46e7d44dc16af152638.pts 03001627/expert_verified/points_label/2a98a638f675f46e7d44dc16af152638.seg 03001627 +03624134/points/ec1eb959cc203f1de5a365227cfe63ec.pts 03624134/expert_verified/points_label/ec1eb959cc203f1de5a365227cfe63ec.seg 03624134 +04379243/points/db0c430a51ac45c19d2be74cfb51ade1.pts 04379243/expert_verified/points_label/db0c430a51ac45c19d2be74cfb51ade1.seg 04379243 +04379243/points/26b2a15646f6a3a06f1e07a56c129dfc.pts 04379243/expert_verified/points_label/26b2a15646f6a3a06f1e07a56c129dfc.seg 04379243 +04379243/points/90343e416528b576f41d9ea5f63b1b05.pts 04379243/expert_verified/points_label/90343e416528b576f41d9ea5f63b1b05.seg 04379243 +03001627/points/43d38ad2f5d103adf9b9977a2406713a.pts 03001627/expert_verified/points_label/43d38ad2f5d103adf9b9977a2406713a.seg 03001627 +03001627/points/e279758e8a5b6a8d492d9da2668ec34c.pts 03001627/expert_verified/points_label/e279758e8a5b6a8d492d9da2668ec34c.seg 03001627 +03642806/points/71907a4a567dce3bb0de1e7a6809fd90.pts 03642806/expert_verified/points_label/71907a4a567dce3bb0de1e7a6809fd90.seg 03642806 +03636649/points/2958cd9fd799bf02cfbcbf340cec6da1.pts 03636649/expert_verified/points_label/2958cd9fd799bf02cfbcbf340cec6da1.seg 03636649 +04379243/points/bd7c71ca15b0d4e56c252f74b6220e29.pts 04379243/expert_verified/points_label/bd7c71ca15b0d4e56c252f74b6220e29.seg 04379243 +04379243/points/51c6a7298408c3f19730cb37c9a5f63b.pts 04379243/expert_verified/points_label/51c6a7298408c3f19730cb37c9a5f63b.seg 04379243 +02691156/points/e3de366a0cfb59ed38294c37c250d7cd.pts 02691156/expert_verified/points_label/e3de366a0cfb59ed38294c37c250d7cd.seg 02691156 +03467517/points/f288cd2146b8f4c1f0e1c0df18936d05.pts 03467517/expert_verified/points_label/f288cd2146b8f4c1f0e1c0df18936d05.seg 03467517 +04379243/points/270430ab9efb9d85c0f947750540fb22.pts 04379243/expert_verified/points_label/270430ab9efb9d85c0f947750540fb22.seg 04379243 +04379243/points/f5ad10e6a938aa80e85c7a030ebdf69a.pts 04379243/expert_verified/points_label/f5ad10e6a938aa80e85c7a030ebdf69a.seg 04379243 +04379243/points/8343d98e3710f5bee1b32bbe69d5bc15.pts 04379243/expert_verified/points_label/8343d98e3710f5bee1b32bbe69d5bc15.seg 04379243 +03790512/points/40b7a63fd9ede0cf48272812609617e2.pts 03790512/expert_verified/points_label/40b7a63fd9ede0cf48272812609617e2.seg 03790512 +03467517/points/16bc13ee237ebeb38460585fe283a1c9.pts 03467517/expert_verified/points_label/16bc13ee237ebeb38460585fe283a1c9.seg 03467517 +02691156/points/a56143efe74ee89ebbf3143b1cb6076a.pts 02691156/expert_verified/points_label/a56143efe74ee89ebbf3143b1cb6076a.seg 02691156 +04379243/points/9a6ab25d91c92a5a35acfdef2ece21c0.pts 04379243/expert_verified/points_label/9a6ab25d91c92a5a35acfdef2ece21c0.seg 04379243 +03467517/points/c9b60abdc17708fb78ad94b294a9faa6.pts 03467517/expert_verified/points_label/c9b60abdc17708fb78ad94b294a9faa6.seg 03467517 +04379243/points/cde67434193a2a6f19fb4103277a6b93.pts 04379243/expert_verified/points_label/cde67434193a2a6f19fb4103277a6b93.seg 04379243 +04379243/points/6b62c85b16e300557005dacb6907e37d.pts 04379243/expert_verified/points_label/6b62c85b16e300557005dacb6907e37d.seg 04379243 +04379243/points/7956ac7aba6295d1c2fd07f66cbad0f7.pts 04379243/expert_verified/points_label/7956ac7aba6295d1c2fd07f66cbad0f7.seg 04379243 +04379243/points/dcda90e411cb4e35506d1e1cc84da713.pts 04379243/expert_verified/points_label/dcda90e411cb4e35506d1e1cc84da713.seg 04379243 +02691156/points/c494f446954523a8a32748a9f843a0bf.pts 02691156/expert_verified/points_label/c494f446954523a8a32748a9f843a0bf.seg 02691156 +02691156/points/18e6f319062ccb49ca8607f540cc62ba.pts 02691156/expert_verified/points_label/18e6f319062ccb49ca8607f540cc62ba.seg 02691156 +04379243/points/b7cead95e18b570d2c97486f63c12d76.pts 04379243/expert_verified/points_label/b7cead95e18b570d2c97486f63c12d76.seg 04379243 +03948459/points/f6d52684720d52a01ab78426351eea4a.pts 03948459/expert_verified/points_label/f6d52684720d52a01ab78426351eea4a.seg 03948459 +04379243/points/7eeceefed2b3aa2794f3bda96cf548cc.pts 04379243/expert_verified/points_label/7eeceefed2b3aa2794f3bda96cf548cc.seg 04379243 +03001627/points/5eaa2730f10054d0f6cabe1df6f4c9d9.pts 03001627/expert_verified/points_label/5eaa2730f10054d0f6cabe1df6f4c9d9.seg 03001627 +03001627/points/92f79b8e45269847f0efa341b439d741.pts 03001627/expert_verified/points_label/92f79b8e45269847f0efa341b439d741.seg 03001627 +03001627/points/cbaca6a6edfa2d512b520984c067934c.pts 03001627/expert_verified/points_label/cbaca6a6edfa2d512b520984c067934c.seg 03001627 +04379243/points/390e0db80fe12ef65fa6da97b9eb4a2f.pts 04379243/expert_verified/points_label/390e0db80fe12ef65fa6da97b9eb4a2f.seg 04379243 +04379243/points/2ec33e8b457ac0fa278d386bfa54545.pts 04379243/expert_verified/points_label/2ec33e8b457ac0fa278d386bfa54545.seg 04379243 +04225987/points/ac2b6924a60a7a87aa4f69d519551495.pts 04225987/expert_verified/points_label/ac2b6924a60a7a87aa4f69d519551495.seg 04225987 +02958343/points/468780ef4ace9a422e877e82c90c24d.pts 02958343/expert_verified/points_label/468780ef4ace9a422e877e82c90c24d.seg 02958343 +03001627/points/78c9204b2eac432b65b77a565916c7f.pts 03001627/expert_verified/points_label/78c9204b2eac432b65b77a565916c7f.seg 03001627 +04379243/points/b278b58e294a7d2bac242c3aebc81b2f.pts 04379243/expert_verified/points_label/b278b58e294a7d2bac242c3aebc81b2f.seg 04379243 +04379243/points/fc95d34ab1afb92b9118eee0b123125f.pts 04379243/expert_verified/points_label/fc95d34ab1afb92b9118eee0b123125f.seg 04379243 +03790512/points/54f016b47a5864cd5dde04c96fd8146.pts 03790512/expert_verified/points_label/54f016b47a5864cd5dde04c96fd8146.seg 03790512 +04379243/points/9afa121e3aec8bd7c387f328a37d8ece.pts 04379243/expert_verified/points_label/9afa121e3aec8bd7c387f328a37d8ece.seg 04379243 +04379243/points/382889dbc86b5dd919fb4103277a6b93.pts 04379243/expert_verified/points_label/382889dbc86b5dd919fb4103277a6b93.seg 04379243 +03467517/points/b83a81b2476ec59e59610f6f40382499.pts 03467517/expert_verified/points_label/b83a81b2476ec59e59610f6f40382499.seg 03467517 +03001627/points/5d959b0f79a22e8c67c9124d122355ab.pts 03001627/expert_verified/points_label/5d959b0f79a22e8c67c9124d122355ab.seg 03001627 +02691156/points/c4111dbb21e1f17043afdb9c81ff2967.pts 02691156/expert_verified/points_label/c4111dbb21e1f17043afdb9c81ff2967.seg 02691156 +02691156/points/46829981c5c25285bfc0a2c490b4c222.pts 02691156/expert_verified/points_label/46829981c5c25285bfc0a2c490b4c222.seg 02691156 +04379243/points/497659c4723fbc4fe90ff84c89de437.pts 04379243/expert_verified/points_label/497659c4723fbc4fe90ff84c89de437.seg 04379243 +02691156/points/a805c30d4b09f11f62347b4731688b0f.pts 02691156/expert_verified/points_label/a805c30d4b09f11f62347b4731688b0f.seg 02691156 +03636649/points/e485053f3e0d18252cd2160e449d45ae.pts 03636649/expert_verified/points_label/e485053f3e0d18252cd2160e449d45ae.seg 03636649 +02958343/points/2fb5fe84c28b8b35cc02882a83047172.pts 02958343/expert_verified/points_label/2fb5fe84c28b8b35cc02882a83047172.seg 02958343 +03636649/points/f7a4590c54e2ac7ce62fad6b4f42c880.pts 03636649/expert_verified/points_label/f7a4590c54e2ac7ce62fad6b4f42c880.seg 03636649 +03642806/points/9fc5b76d363ca64ed03066fc8168e9c6.pts 03642806/expert_verified/points_label/9fc5b76d363ca64ed03066fc8168e9c6.seg 03642806 +02691156/points/be080a797406422843afdb9c81ff2967.pts 02691156/expert_verified/points_label/be080a797406422843afdb9c81ff2967.seg 02691156 +04379243/points/81a84fcb2b247a3348eaa510713cb074.pts 04379243/expert_verified/points_label/81a84fcb2b247a3348eaa510713cb074.seg 04379243 +03001627/points/47c540c2e9c3483ce79a6b87656a120a.pts 03001627/expert_verified/points_label/47c540c2e9c3483ce79a6b87656a120a.seg 03001627 +03001627/points/5073d7a546b9a4d0e810eba61b778ebb.pts 03001627/expert_verified/points_label/5073d7a546b9a4d0e810eba61b778ebb.seg 03001627 +03001627/points/e4a890f2330ebd7e4a11872aa986426d.pts 03001627/expert_verified/points_label/e4a890f2330ebd7e4a11872aa986426d.seg 03001627 +03001627/points/a7200578bd7bea065dc3653f8341633a.pts 03001627/expert_verified/points_label/a7200578bd7bea065dc3653f8341633a.seg 03001627 +03467517/points/b004331ee5cc39caa24eeca91f583600.pts 03467517/expert_verified/points_label/b004331ee5cc39caa24eeca91f583600.seg 03467517 +04379243/points/f01768b8b8ba025ee45ef4135c266a12.pts 04379243/expert_verified/points_label/f01768b8b8ba025ee45ef4135c266a12.seg 04379243 +03642806/points/5173aa7f75ff3cf1b55fde51a411949f.pts 03642806/expert_verified/points_label/5173aa7f75ff3cf1b55fde51a411949f.seg 03642806 +03636649/points/e7e45a8f0b0ab311c754474f0ac106.pts 03636649/expert_verified/points_label/e7e45a8f0b0ab311c754474f0ac106.seg 03636649 +03642806/points/1b67b4bfed6688ba5b22feddf58c05e1.pts 03642806/expert_verified/points_label/1b67b4bfed6688ba5b22feddf58c05e1.seg 03642806 +03797390/points/f1e439307b834015770a0ff1161fa15a.pts 03797390/expert_verified/points_label/f1e439307b834015770a0ff1161fa15a.seg 03797390 +03001627/points/b6c9495629c00419940806ade53ef2f.pts 03001627/expert_verified/points_label/b6c9495629c00419940806ade53ef2f.seg 03001627 +03001627/points/8e19d2ec95c45186a6fd617b2ff5d2d.pts 03001627/expert_verified/points_label/8e19d2ec95c45186a6fd617b2ff5d2d.seg 03001627 +03001627/points/d7b8189fe69cebedc41b07b1627c4b43.pts 03001627/expert_verified/points_label/d7b8189fe69cebedc41b07b1627c4b43.seg 03001627 +02691156/points/a7a0e7eddf4ffb8c19378fd691582500.pts 02691156/expert_verified/points_label/a7a0e7eddf4ffb8c19378fd691582500.seg 02691156 +03001627/points/2b6cbad4ba1e9a0645881d7eab1353ba.pts 03001627/expert_verified/points_label/2b6cbad4ba1e9a0645881d7eab1353ba.seg 03001627 +04379243/points/dade0594e68e2250be6c545952e7fa4a.pts 04379243/expert_verified/points_label/dade0594e68e2250be6c545952e7fa4a.seg 04379243 +03001627/points/9850d225049f987e9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/9850d225049f987e9b9f2eb77f5e247e.seg 03001627 +03948459/points/e9e6426605eb6d5952d52701459b1f0.pts 03948459/expert_verified/points_label/e9e6426605eb6d5952d52701459b1f0.seg 03948459 +03636649/points/e507bc77c03a1b3afcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/e507bc77c03a1b3afcb8d8c6d4df8143.seg 03636649 +03797390/points/a6d9f9ae39728831808951ff5fb582ac.pts 03797390/expert_verified/points_label/a6d9f9ae39728831808951ff5fb582ac.seg 03797390 +04379243/points/3144ba0c286cc61f490ad276cd2af3a4.pts 04379243/expert_verified/points_label/3144ba0c286cc61f490ad276cd2af3a4.seg 04379243 +04379243/points/9be565678aab11cba0ab1d82ef09f78f.pts 04379243/expert_verified/points_label/9be565678aab11cba0ab1d82ef09f78f.seg 04379243 +04379243/points/a4b2870ce7a54b8eec11c6b035aac769.pts 04379243/expert_verified/points_label/a4b2870ce7a54b8eec11c6b035aac769.seg 04379243 +03636649/points/78b95abd1d1158ffef3a2c64cef919d0.pts 03636649/expert_verified/points_label/78b95abd1d1158ffef3a2c64cef919d0.seg 03636649 +04379243/points/2182028f013e7eb530bbd4cddd04c77b.pts 04379243/expert_verified/points_label/2182028f013e7eb530bbd4cddd04c77b.seg 04379243 +02691156/points/e00b89bc338348caa42c49797afd1f5c.pts 02691156/expert_verified/points_label/e00b89bc338348caa42c49797afd1f5c.seg 02691156 +03001627/points/9d28a066df22319cca2e16d6cd76503c.pts 03001627/expert_verified/points_label/9d28a066df22319cca2e16d6cd76503c.seg 03001627 +03636649/points/3c4d8c4ebe9dedbc2cd2160e449d45ae.pts 03636649/expert_verified/points_label/3c4d8c4ebe9dedbc2cd2160e449d45ae.seg 03636649 +02691156/points/97d662e5e6345b46bd46d022fd7d80aa.pts 02691156/expert_verified/points_label/97d662e5e6345b46bd46d022fd7d80aa.seg 02691156 +03001627/points/9dac39c51680daa2f71e06115e9c3b3e.pts 03001627/expert_verified/points_label/9dac39c51680daa2f71e06115e9c3b3e.seg 03001627 +03624134/points/1ecb37ea8f0c4abc20fc54d2500eb7f1.pts 03624134/expert_verified/points_label/1ecb37ea8f0c4abc20fc54d2500eb7f1.seg 03624134 +03624134/points/3a0f48139bfd3a4ea152d2e823b9fe06.pts 03624134/expert_verified/points_label/3a0f48139bfd3a4ea152d2e823b9fe06.seg 03624134 +04379243/points/1264d88ae599df3fbeedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/1264d88ae599df3fbeedb4c8fd29e2d1.seg 04379243 +03001627/points/97bbc8970b05c4a3fcde6bcb709edd9a.pts 03001627/expert_verified/points_label/97bbc8970b05c4a3fcde6bcb709edd9a.seg 03001627 +03636649/points/1f58b59a1b6b06df766fc93a239bada0.pts 03636649/expert_verified/points_label/1f58b59a1b6b06df766fc93a239bada0.seg 03636649 +03001627/points/eb51e814c3f44a07914ced7dab3536b9.pts 03001627/expert_verified/points_label/eb51e814c3f44a07914ced7dab3536b9.seg 03001627 +03636649/points/a138582b1d0b9cbb137af984a9f45d65.pts 03636649/expert_verified/points_label/a138582b1d0b9cbb137af984a9f45d65.seg 03636649 +03790512/points/9f9de88a95b56660b37378f3c85478b4.pts 03790512/expert_verified/points_label/9f9de88a95b56660b37378f3c85478b4.seg 03790512 +03001627/points/a521fba02ca7f9aa822215026d1e8d82.pts 03001627/expert_verified/points_label/a521fba02ca7f9aa822215026d1e8d82.seg 03001627 +04225987/points/d303055e96cd59949da15808191f1405.pts 04225987/expert_verified/points_label/d303055e96cd59949da15808191f1405.seg 04225987 +04379243/points/7e3022a7bd00eb4195b8ea6a366e14d.pts 04379243/expert_verified/points_label/7e3022a7bd00eb4195b8ea6a366e14d.seg 04379243 +02691156/points/d83300deab42c100eb9db4e832a6dd82.pts 02691156/expert_verified/points_label/d83300deab42c100eb9db4e832a6dd82.seg 02691156 +03642806/points/a4b410734514306ac401e233323032d6.pts 03642806/expert_verified/points_label/a4b410734514306ac401e233323032d6.seg 03642806 +03790512/points/532e6f88a9975a27b37378f3c85478b4.pts 03790512/expert_verified/points_label/532e6f88a9975a27b37378f3c85478b4.seg 03790512 +03642806/points/cc691d9e8e189ce47a381a112bfd785.pts 03642806/expert_verified/points_label/cc691d9e8e189ce47a381a112bfd785.seg 03642806 +02691156/points/aa07239e9397cf189601fb40d0d298b9.pts 02691156/expert_verified/points_label/aa07239e9397cf189601fb40d0d298b9.seg 02691156 +03642806/points/cc0535a34cdc7d676bf98d15712168f.pts 03642806/expert_verified/points_label/cc0535a34cdc7d676bf98d15712168f.seg 03642806 +02691156/points/ddec69970cbc4d29112a90660b187a10.pts 02691156/expert_verified/points_label/ddec69970cbc4d29112a90660b187a10.seg 02691156 +04379243/points/268e68f1819a225c1b4b790955c17432.pts 04379243/expert_verified/points_label/268e68f1819a225c1b4b790955c17432.seg 04379243 +03624134/points/1943c87f92ac76e112cad8be168fe72d.pts 03624134/expert_verified/points_label/1943c87f92ac76e112cad8be168fe72d.seg 03624134 +04379243/points/b9fc2f624533bb8119fb4103277a6b93.pts 04379243/expert_verified/points_label/b9fc2f624533bb8119fb4103277a6b93.seg 04379243 +03001627/points/1c45b266d3c879dab36dcc661f3905d.pts 03001627/expert_verified/points_label/1c45b266d3c879dab36dcc661f3905d.seg 03001627 +03948459/points/1660ef4b3f20b1e2a94b922b533051b7.pts 03948459/expert_verified/points_label/1660ef4b3f20b1e2a94b922b533051b7.seg 03948459 +02691156/points/167250e2014c72dbb87697d3904b168b.pts 02691156/expert_verified/points_label/167250e2014c72dbb87697d3904b168b.seg 02691156 +02691156/points/dfe65f8a20df11c5d1df55cbe0874aa.pts 02691156/expert_verified/points_label/dfe65f8a20df11c5d1df55cbe0874aa.seg 02691156 +03001627/points/44a2a3952ea2315ff51f77a6d7299806.pts 03001627/expert_verified/points_label/44a2a3952ea2315ff51f77a6d7299806.seg 03001627 +04379243/points/a1896691fe875eccb9968f25875bdef4.pts 04379243/expert_verified/points_label/a1896691fe875eccb9968f25875bdef4.seg 04379243 +04379243/points/6f3506c9c5202101c4e8952b27b5f370.pts 04379243/expert_verified/points_label/6f3506c9c5202101c4e8952b27b5f370.seg 04379243 +04379243/points/fead7e0c30a347b1710801cae5dc529.pts 04379243/expert_verified/points_label/fead7e0c30a347b1710801cae5dc529.seg 04379243 +04379243/points/384bf53e12744e2019fb4103277a6b93.pts 04379243/expert_verified/points_label/384bf53e12744e2019fb4103277a6b93.seg 04379243 +03001627/points/30378faa6bf5b245fdef1c01cbd4ae0c.pts 03001627/expert_verified/points_label/30378faa6bf5b245fdef1c01cbd4ae0c.seg 03001627 +04379243/points/5690d17b330f73adfeb8ceb93793cb5.pts 04379243/expert_verified/points_label/5690d17b330f73adfeb8ceb93793cb5.seg 04379243 +03467517/points/2e4ec0874ea34a50812ca0ac90db1c07.pts 03467517/expert_verified/points_label/2e4ec0874ea34a50812ca0ac90db1c07.seg 03467517 +03001627/points/a007a3cd5b8ca7fb19fb4103277a6b93.pts 03001627/expert_verified/points_label/a007a3cd5b8ca7fb19fb4103277a6b93.seg 03001627 +03001627/points/bc21c95f766502a78b03575bb54dfd4.pts 03001627/expert_verified/points_label/bc21c95f766502a78b03575bb54dfd4.seg 03001627 +04379243/points/6a3ee73d42228f8581654cb17c02fd.pts 04379243/expert_verified/points_label/6a3ee73d42228f8581654cb17c02fd.seg 04379243 +04379243/points/4b399cdce8337c29285e0e27752e54a8.pts 04379243/expert_verified/points_label/4b399cdce8337c29285e0e27752e54a8.seg 04379243 +04379243/points/7f9d2da43d6aba67afb6676a5cd782b6.pts 04379243/expert_verified/points_label/7f9d2da43d6aba67afb6676a5cd782b6.seg 04379243 +03001627/points/72669be1815b2bb81e4fe86c4ad3ec90.pts 03001627/expert_verified/points_label/72669be1815b2bb81e4fe86c4ad3ec90.seg 03001627 +04379243/points/223fbcc813831d8c6e526771d2f7444e.pts 04379243/expert_verified/points_label/223fbcc813831d8c6e526771d2f7444e.seg 04379243 +02691156/points/adeb5d68e8d65cc419ba010ddb4974fe.pts 02691156/expert_verified/points_label/adeb5d68e8d65cc419ba010ddb4974fe.seg 02691156 +03001627/points/8a9d8dad6800d55ff37af16b2893f1d4.pts 03001627/expert_verified/points_label/8a9d8dad6800d55ff37af16b2893f1d4.seg 03001627 +04379243/points/db406d9b2a94bce5622d7484764b58f.pts 04379243/expert_verified/points_label/db406d9b2a94bce5622d7484764b58f.seg 04379243 +03001627/points/68b88c0be088c21d5e0096fb2d3266a.pts 03001627/expert_verified/points_label/68b88c0be088c21d5e0096fb2d3266a.seg 03001627 +03790512/points/973d75ed9c12836f3d033e6cf82ec72c.pts 03790512/expert_verified/points_label/973d75ed9c12836f3d033e6cf82ec72c.seg 03790512 +04379243/points/20292fba71362950c59c53f7df509858.pts 04379243/expert_verified/points_label/20292fba71362950c59c53f7df509858.seg 04379243 +03001627/points/21fb308ca737174e22f2f93459bd863e.pts 03001627/expert_verified/points_label/21fb308ca737174e22f2f93459bd863e.seg 03001627 +03001627/points/be9d5105e48ae27e713decb1a0563b12.pts 03001627/expert_verified/points_label/be9d5105e48ae27e713decb1a0563b12.seg 03001627 +02958343/points/c6441f127d51e478f0fb72d24c42a39.pts 02958343/expert_verified/points_label/c6441f127d51e478f0fb72d24c42a39.seg 02958343 +03001627/points/f29cbdb2c7bb10f9953d950bcd7de7a.pts 03001627/expert_verified/points_label/f29cbdb2c7bb10f9953d950bcd7de7a.seg 03001627 +02691156/points/65654b5c4e488e0c961fa14fc879444e.pts 02691156/expert_verified/points_label/65654b5c4e488e0c961fa14fc879444e.seg 02691156 +04379243/points/8654b644c766dd23d1dcc55e36186e4e.pts 04379243/expert_verified/points_label/8654b644c766dd23d1dcc55e36186e4e.seg 04379243 +04379243/points/56bb7376dfa9cb5c8cf069d506f8b5ac.pts 04379243/expert_verified/points_label/56bb7376dfa9cb5c8cf069d506f8b5ac.seg 04379243 +04379243/points/d291243cfb51ea7dcb25d116843b43a4.pts 04379243/expert_verified/points_label/d291243cfb51ea7dcb25d116843b43a4.seg 04379243 +03790512/points/49edb54e97458de8d373c34785838ee4.pts 03790512/expert_verified/points_label/49edb54e97458de8d373c34785838ee4.seg 03790512 +04379243/points/216da8313bc7b192ab610b0c94236463.pts 04379243/expert_verified/points_label/216da8313bc7b192ab610b0c94236463.seg 04379243 +03001627/points/5ac8b44ff77e5490c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/5ac8b44ff77e5490c8687ff9b0b4e4ac.seg 03001627 +03001627/points/956063d67b939431f56aa11cd5e0c3e.pts 03001627/expert_verified/points_label/956063d67b939431f56aa11cd5e0c3e.seg 03001627 +04379243/points/8dd8370dcaa8d770ea5682a3b818969a.pts 04379243/expert_verified/points_label/8dd8370dcaa8d770ea5682a3b818969a.seg 04379243 +03636649/points/3b64d5033c580d2ef76898f881b76a.pts 03636649/expert_verified/points_label/3b64d5033c580d2ef76898f881b76a.seg 03636649 +03001627/points/3d9dce1953180fe6f9c9f9697d1ec60.pts 03001627/expert_verified/points_label/3d9dce1953180fe6f9c9f9697d1ec60.seg 03001627 +03001627/points/d1b03eeb33fd441d8189e5e3786f2290.pts 03001627/expert_verified/points_label/d1b03eeb33fd441d8189e5e3786f2290.seg 03001627 +02691156/points/5294c39d2a57bd7e5cad6226edb8e82.pts 02691156/expert_verified/points_label/5294c39d2a57bd7e5cad6226edb8e82.seg 02691156 +04379243/points/7bc93a4cc26fab5c8c12b667670a35f2.pts 04379243/expert_verified/points_label/7bc93a4cc26fab5c8c12b667670a35f2.seg 04379243 +04379243/points/813d34995b5c4406b65b71636c46ae49.pts 04379243/expert_verified/points_label/813d34995b5c4406b65b71636c46ae49.seg 04379243 +03001627/points/6782b941de7b2199a344c33f76676fbd.pts 03001627/expert_verified/points_label/6782b941de7b2199a344c33f76676fbd.seg 03001627 +03636649/points/ea5ae3cfd142c3b923f93f957094a824.pts 03636649/expert_verified/points_label/ea5ae3cfd142c3b923f93f957094a824.seg 03636649 +03001627/points/47caca00f993bc4e4b3c42e318f3affc.pts 03001627/expert_verified/points_label/47caca00f993bc4e4b3c42e318f3affc.seg 03001627 +02691156/points/b702e35f4a59e81f64801ad2940cdd5.pts 02691156/expert_verified/points_label/b702e35f4a59e81f64801ad2940cdd5.seg 02691156 +03636649/points/3b5f0c01c2b914fc6f16f167d27a7dab.pts 03636649/expert_verified/points_label/3b5f0c01c2b914fc6f16f167d27a7dab.seg 03636649 +04379243/points/ad63116007d98a6d19758238d4c7aff2.pts 04379243/expert_verified/points_label/ad63116007d98a6d19758238d4c7aff2.seg 04379243 +03797390/points/8f6c86feaa74698d5c91ee20ade72edc.pts 03797390/expert_verified/points_label/8f6c86feaa74698d5c91ee20ade72edc.seg 03797390 +04379243/points/48baef3ab18d2d43d2afe8d5254a0d04.pts 04379243/expert_verified/points_label/48baef3ab18d2d43d2afe8d5254a0d04.seg 04379243 +03001627/points/fe5310a3457bf0e5c4e8952b27b5f370.pts 03001627/expert_verified/points_label/fe5310a3457bf0e5c4e8952b27b5f370.seg 03001627 +04379243/points/d4c330d27bbef3808f6610bf672cd686.pts 04379243/expert_verified/points_label/d4c330d27bbef3808f6610bf672cd686.seg 04379243 +04379243/points/adcb67b58024afb99910b7ec4c4e599b.pts 04379243/expert_verified/points_label/adcb67b58024afb99910b7ec4c4e599b.seg 04379243 +02958343/points/65d6433043c40046b82c0841410a924f.pts 02958343/expert_verified/points_label/65d6433043c40046b82c0841410a924f.seg 02958343 +04379243/points/1a00aa6b75362cc5b324368d54a7416f.pts 04379243/expert_verified/points_label/1a00aa6b75362cc5b324368d54a7416f.seg 04379243 +04379243/points/7982e2f2984978c6f4b6538438a0b930.pts 04379243/expert_verified/points_label/7982e2f2984978c6f4b6538438a0b930.seg 04379243 +03467517/points/26e1801ea747f72f14fe0da28e4f8384.pts 03467517/expert_verified/points_label/26e1801ea747f72f14fe0da28e4f8384.seg 03467517 +04379243/points/c8ee4a8b703180992985858e6f5832da.pts 04379243/expert_verified/points_label/c8ee4a8b703180992985858e6f5832da.seg 04379243 +02691156/points/f24daae76836e249f0878b58b4e887bf.pts 02691156/expert_verified/points_label/f24daae76836e249f0878b58b4e887bf.seg 02691156 +04379243/points/f29863d2fe8863d4195b8ea6a366e14d.pts 04379243/expert_verified/points_label/f29863d2fe8863d4195b8ea6a366e14d.seg 04379243 +04379243/points/babb0963a0e17bb59cd0aef0207ac8c6.pts 04379243/expert_verified/points_label/babb0963a0e17bb59cd0aef0207ac8c6.seg 04379243 +03001627/points/39911f927331db1c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/39911f927331db1c8687ff9b0b4e4ac.seg 03001627 +03001627/points/4a9d3ce54c09a2da696b74614952b2d0.pts 03001627/expert_verified/points_label/4a9d3ce54c09a2da696b74614952b2d0.seg 03001627 +03642806/points/caa4afd404f24d21275c1147a304ed86.pts 03642806/expert_verified/points_label/caa4afd404f24d21275c1147a304ed86.seg 03642806 +02691156/points/ff6e377e8e5b3757cc34b900bb2492e.pts 02691156/expert_verified/points_label/ff6e377e8e5b3757cc34b900bb2492e.seg 02691156 +03001627/points/483cfed0659965ed73c478529c40c4e6.pts 03001627/expert_verified/points_label/483cfed0659965ed73c478529c40c4e6.seg 03001627 +03797390/points/4b7888feea81219ab5f4a9188bfa0ef6.pts 03797390/expert_verified/points_label/4b7888feea81219ab5f4a9188bfa0ef6.seg 03797390 +03790512/points/40d84e407c46e8d8b31e74d456742c7.pts 03790512/expert_verified/points_label/40d84e407c46e8d8b31e74d456742c7.seg 03790512 +04379243/points/176e3b32d749ac94d79f2fc0b8d8ffad.pts 04379243/expert_verified/points_label/176e3b32d749ac94d79f2fc0b8d8ffad.seg 04379243 +03001627/points/657790bc7fd16326c132086242d50af2.pts 03001627/expert_verified/points_label/657790bc7fd16326c132086242d50af2.seg 03001627 +04379243/points/94c0ab5650ea392ddcfcef693e7ec696.pts 04379243/expert_verified/points_label/94c0ab5650ea392ddcfcef693e7ec696.seg 04379243 +03624134/points/bf5cae3922d3cb2bca7250d90eb506cf.pts 03624134/expert_verified/points_label/bf5cae3922d3cb2bca7250d90eb506cf.seg 03624134 +03001627/points/49a3b0242c13f92da6fee8e2140acec9.pts 03001627/expert_verified/points_label/49a3b0242c13f92da6fee8e2140acec9.seg 03001627 +03636649/points/e4c9bb21fe5bfeb3e21f078602e2eda8.pts 03636649/expert_verified/points_label/e4c9bb21fe5bfeb3e21f078602e2eda8.seg 03636649 +03636649/points/6595ee36783d261ed3281970e2c44dbe.pts 03636649/expert_verified/points_label/6595ee36783d261ed3281970e2c44dbe.seg 03636649 +02958343/points/9a152b11907b11074549b3c52ae0632e.pts 02958343/expert_verified/points_label/9a152b11907b11074549b3c52ae0632e.seg 02958343 +04379243/points/68a7bad2b06bc1a9d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/68a7bad2b06bc1a9d93768e7b9b1eabf.seg 04379243 +04379243/points/b9c756b2ff5d66ddfebad4f49b26ec52.pts 04379243/expert_verified/points_label/b9c756b2ff5d66ddfebad4f49b26ec52.seg 04379243 +03797390/points/2d10421716b16580e45ef4135c266a12.pts 03797390/expert_verified/points_label/2d10421716b16580e45ef4135c266a12.seg 03797390 +03001627/points/2c76aaa00e55c26836c07750784b6bc6.pts 03001627/expert_verified/points_label/2c76aaa00e55c26836c07750784b6bc6.seg 03001627 +03636649/points/5cca570916f420e64b3c42e318f3affc.pts 03636649/expert_verified/points_label/5cca570916f420e64b3c42e318f3affc.seg 03636649 +03001627/points/9225e57e34334ee019cb07ecb5b4102.pts 03001627/expert_verified/points_label/9225e57e34334ee019cb07ecb5b4102.seg 03001627 +03001627/points/17aeeadccf0e560e274b862d3a151946.pts 03001627/expert_verified/points_label/17aeeadccf0e560e274b862d3a151946.seg 03001627 +03636649/points/427806f30c61059c22e05b5d2ce39e3b.pts 03636649/expert_verified/points_label/427806f30c61059c22e05b5d2ce39e3b.seg 03636649 +03636649/points/17349d6d35aac0685ed28d6c8a1bdfe5.pts 03636649/expert_verified/points_label/17349d6d35aac0685ed28d6c8a1bdfe5.seg 03636649 +04379243/points/5ee4cbe45bdc4cd571a782a4379556c7.pts 04379243/expert_verified/points_label/5ee4cbe45bdc4cd571a782a4379556c7.seg 04379243 +03636649/points/5eda619e5f36499fc1537287b5c50d9d.pts 03636649/expert_verified/points_label/5eda619e5f36499fc1537287b5c50d9d.seg 03636649 +02691156/points/f57c74e194cd2b2bc8727b27ee96a4b7.pts 02691156/expert_verified/points_label/f57c74e194cd2b2bc8727b27ee96a4b7.seg 02691156 +02958343/points/27d42437168ccd7ddd75f724c0ccbe00.pts 02958343/expert_verified/points_label/27d42437168ccd7ddd75f724c0ccbe00.seg 02958343 +04379243/points/c8cf1c77bbb79d214719088c8e42c6ab.pts 04379243/expert_verified/points_label/c8cf1c77bbb79d214719088c8e42c6ab.seg 04379243 +04379243/points/40b48121d1879be2ee0605a41c3320d6.pts 04379243/expert_verified/points_label/40b48121d1879be2ee0605a41c3320d6.seg 04379243 +02691156/points/4f9b12d07dce21ac9d93a50cb0355558.pts 02691156/expert_verified/points_label/4f9b12d07dce21ac9d93a50cb0355558.seg 02691156 +02691156/points/25bd1569261bc545e8323edc0fe816a8.pts 02691156/expert_verified/points_label/25bd1569261bc545e8323edc0fe816a8.seg 02691156 +02691156/points/fbc429365ab7136be1a9c234926c21e2.pts 02691156/expert_verified/points_label/fbc429365ab7136be1a9c234926c21e2.seg 02691156 +04379243/points/798c315f86d8f02f931e98da3a93e73e.pts 04379243/expert_verified/points_label/798c315f86d8f02f931e98da3a93e73e.seg 04379243 +03790512/points/a0a40a9d5aabd6a7d5dde04c96fd8146.pts 03790512/expert_verified/points_label/a0a40a9d5aabd6a7d5dde04c96fd8146.seg 03790512 +04379243/points/884f15cfc6a3eea3dcfcef693e7ec696.pts 04379243/expert_verified/points_label/884f15cfc6a3eea3dcfcef693e7ec696.seg 04379243 +04379243/points/f16f939baeb7722e664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/f16f939baeb7722e664b3b9b23ddfcbc.seg 04379243 +03001627/points/1e0580f443a9e6d2593ebeeedbff73b.pts 03001627/expert_verified/points_label/1e0580f443a9e6d2593ebeeedbff73b.seg 03001627 +03636649/points/927e0654427c4d0b82241d99b4e87f38.pts 03636649/expert_verified/points_label/927e0654427c4d0b82241d99b4e87f38.seg 03636649 +03001627/points/bdd29e651e5f6fb2b079317292bdc5d4.pts 03001627/expert_verified/points_label/bdd29e651e5f6fb2b079317292bdc5d4.seg 03001627 +03642806/points/cb1e3a990782678b4b6682da890df381.pts 03642806/expert_verified/points_label/cb1e3a990782678b4b6682da890df381.seg 03642806 +03001627/points/fd5ac9b342fe518b9d3ea1c6b57a0095.pts 03001627/expert_verified/points_label/fd5ac9b342fe518b9d3ea1c6b57a0095.seg 03001627 +02958343/points/6bbcd5608ddf871a4cdd04162f008888.pts 02958343/expert_verified/points_label/6bbcd5608ddf871a4cdd04162f008888.seg 02958343 +04379243/points/76338ed3326689b249524cfd5973a145.pts 04379243/expert_verified/points_label/76338ed3326689b249524cfd5973a145.seg 04379243 +03001627/points/9a0571ae6169a6ebfebad4f49b26ec52.pts 03001627/expert_verified/points_label/9a0571ae6169a6ebfebad4f49b26ec52.seg 03001627 +03948459/points/49429e1d1e90c1ca202be79d8b285c1e.pts 03948459/expert_verified/points_label/49429e1d1e90c1ca202be79d8b285c1e.seg 03948459 +02691156/points/45a4ec99ed13ed773c2498c4c2f13ca.pts 02691156/expert_verified/points_label/45a4ec99ed13ed773c2498c4c2f13ca.seg 02691156 +04379243/points/70995336d06fc07ae9f3e9c758fef992.pts 04379243/expert_verified/points_label/70995336d06fc07ae9f3e9c758fef992.seg 04379243 +03001627/points/6fd76577d0df60669b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/6fd76577d0df60669b9f2eb77f5e247e.seg 03001627 +03001627/points/66f18d05d960ffe0bcd12732b5a4b789.pts 03001627/expert_verified/points_label/66f18d05d960ffe0bcd12732b5a4b789.seg 03001627 +03001627/points/e401be99c5a51d8bef8e9284f76f3024.pts 03001627/expert_verified/points_label/e401be99c5a51d8bef8e9284f76f3024.seg 03001627 +03001627/points/4a0b61d33846824ab1f04c301b6ccc90.pts 03001627/expert_verified/points_label/4a0b61d33846824ab1f04c301b6ccc90.seg 03001627 +04379243/points/9a5cb4122d518111b339f790b1757e92.pts 04379243/expert_verified/points_label/9a5cb4122d518111b339f790b1757e92.seg 04379243 +04379243/points/6281381ce38aa988de98d10ab5975b59.pts 04379243/expert_verified/points_label/6281381ce38aa988de98d10ab5975b59.seg 04379243 +04379243/points/d382d9e34f365544278d386bfa54545.pts 04379243/expert_verified/points_label/d382d9e34f365544278d386bfa54545.seg 04379243 +03948459/points/6de6e56c6f7d43692866658c90231a1a.pts 03948459/expert_verified/points_label/6de6e56c6f7d43692866658c90231a1a.seg 03948459 +02691156/points/494a1698eb82572c3df325aac2f73830.pts 02691156/expert_verified/points_label/494a1698eb82572c3df325aac2f73830.seg 02691156 +02691156/points/c581942f40cbb60819ba010ddb4974fe.pts 02691156/expert_verified/points_label/c581942f40cbb60819ba010ddb4974fe.seg 02691156 +04379243/points/e9038664b7d35e6b436e6787c76ef3f0.pts 04379243/expert_verified/points_label/e9038664b7d35e6b436e6787c76ef3f0.seg 04379243 +04099429/points/56c13d294f8afb1ffb88d148e845f82e.pts 04099429/expert_verified/points_label/56c13d294f8afb1ffb88d148e845f82e.seg 04099429 +02958343/points/86fa16c6da908e6b44221994b043fd86.pts 02958343/expert_verified/points_label/86fa16c6da908e6b44221994b043fd86.seg 02958343 +04379243/points/3249c3ad90085a9e98d5fc0473d00a1c.pts 04379243/expert_verified/points_label/3249c3ad90085a9e98d5fc0473d00a1c.seg 04379243 +03636649/points/8581a3ae1f77319ac066b9622c005c53.pts 03636649/expert_verified/points_label/8581a3ae1f77319ac066b9622c005c53.seg 03636649 +03790512/points/6e1397773a4d15db429f1c522640e6f0.pts 03790512/expert_verified/points_label/6e1397773a4d15db429f1c522640e6f0.seg 03790512 +03624134/points/c1ab7029de67351cf97a65c35ea619f0.pts 03624134/expert_verified/points_label/c1ab7029de67351cf97a65c35ea619f0.seg 03624134 +04379243/points/16e874e6165e836b30bbd4cddd04c77b.pts 04379243/expert_verified/points_label/16e874e6165e836b30bbd4cddd04c77b.seg 04379243 +03636649/points/ff08713d837d87edf2098a9f7fc86999.pts 03636649/expert_verified/points_label/ff08713d837d87edf2098a9f7fc86999.seg 03636649 +03790512/points/b649be9c09e2b332429f1c522640e6f0.pts 03790512/expert_verified/points_label/b649be9c09e2b332429f1c522640e6f0.seg 03790512 +03001627/points/85b16941984902f8facfa12c7d71c89f.pts 03001627/expert_verified/points_label/85b16941984902f8facfa12c7d71c89f.seg 03001627 +04379243/points/cf1a7653c10aaa0eab610b0c94236463.pts 04379243/expert_verified/points_label/cf1a7653c10aaa0eab610b0c94236463.seg 04379243 +03001627/points/a42aa59fa23b4a4d9c0ca344f487323e.pts 03001627/expert_verified/points_label/a42aa59fa23b4a4d9c0ca344f487323e.seg 03001627 +03001627/points/3f4f1d18c61a07f134b707eb14b2a4a5.pts 03001627/expert_verified/points_label/3f4f1d18c61a07f134b707eb14b2a4a5.seg 03001627 +03001627/points/d2b9e98373e96afec8d65ca96e6b18ef.pts 03001627/expert_verified/points_label/d2b9e98373e96afec8d65ca96e6b18ef.seg 03001627 +03636649/points/71dffdee89efe07cdff00b2637ddcbde.pts 03636649/expert_verified/points_label/71dffdee89efe07cdff00b2637ddcbde.seg 03636649 +02691156/points/5ac0cd21410b2a6a341877ff7a6c751f.pts 02691156/expert_verified/points_label/5ac0cd21410b2a6a341877ff7a6c751f.seg 02691156 +03636649/points/76eb7436c40e083384d184bdc625781a.pts 03636649/expert_verified/points_label/76eb7436c40e083384d184bdc625781a.seg 03636649 +03642806/points/13330d1e7b199dd82530b9c2b65d3f86.pts 03642806/expert_verified/points_label/13330d1e7b199dd82530b9c2b65d3f86.seg 03642806 +02691156/points/e726c8e6897130439a6e43b878d5b335.pts 02691156/expert_verified/points_label/e726c8e6897130439a6e43b878d5b335.seg 02691156 +04379243/points/40a402e1d949364a104ceb84075e40d6.pts 04379243/expert_verified/points_label/40a402e1d949364a104ceb84075e40d6.seg 04379243 +03001627/points/42140baad25c8598baa1a4ff2c45ffc9.pts 03001627/expert_verified/points_label/42140baad25c8598baa1a4ff2c45ffc9.seg 03001627 +03001627/points/5283a98b5c693e64ebefe6b1d594ad2e.pts 03001627/expert_verified/points_label/5283a98b5c693e64ebefe6b1d594ad2e.seg 03001627 +02691156/points/15898fef6fec88c53ada73811bb576de.pts 02691156/expert_verified/points_label/15898fef6fec88c53ada73811bb576de.seg 02691156 +03001627/points/3f8d0d53e2bd74124b3c42e318f3affc.pts 03001627/expert_verified/points_label/3f8d0d53e2bd74124b3c42e318f3affc.seg 03001627 +04379243/points/cd106955d3bdf8e751c4deb11af7079e.pts 04379243/expert_verified/points_label/cd106955d3bdf8e751c4deb11af7079e.seg 04379243 +03001627/points/11506b96d41f7d3dd7c4a943f33e0384.pts 03001627/expert_verified/points_label/11506b96d41f7d3dd7c4a943f33e0384.seg 03001627 +03001627/points/f51ab8433184dfd2c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/f51ab8433184dfd2c8687ff9b0b4e4ac.seg 03001627 +02691156/points/c9a6dcf87d1f15bca8607f540cc62ba.pts 02691156/expert_verified/points_label/c9a6dcf87d1f15bca8607f540cc62ba.seg 02691156 +04379243/points/d9c75799ff9ff74664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/d9c75799ff9ff74664b3b9b23ddfcbc.seg 04379243 +04379243/points/93e81005c19a74b8664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/93e81005c19a74b8664b3b9b23ddfcbc.seg 04379243 +02958343/points/5057c9dbf72e0352728fa2df514c65d4.pts 02958343/expert_verified/points_label/5057c9dbf72e0352728fa2df514c65d4.seg 02958343 +04379243/points/8ad88ee4442fd0fd8a6ba7ebad3985bb.pts 04379243/expert_verified/points_label/8ad88ee4442fd0fd8a6ba7ebad3985bb.seg 04379243 +04379243/points/a2554ec7e2331a8fab610b0c94236463.pts 04379243/expert_verified/points_label/a2554ec7e2331a8fab610b0c94236463.seg 04379243 +04379243/points/482a76d14781e55e25374da32e705c.pts 04379243/expert_verified/points_label/482a76d14781e55e25374da32e705c.seg 04379243 +02691156/points/d06105ee2a2ae27c51008e496c6cfd2e.pts 02691156/expert_verified/points_label/d06105ee2a2ae27c51008e496c6cfd2e.seg 02691156 +04379243/points/45a09b1ce3111e4f22f4fabdf1ee0670.pts 04379243/expert_verified/points_label/45a09b1ce3111e4f22f4fabdf1ee0670.seg 04379243 +03467517/points/9aaad035af7e6ab1ed724609df3eb104.pts 03467517/expert_verified/points_label/9aaad035af7e6ab1ed724609df3eb104.seg 03467517 +02691156/points/cf0cdaa94220ee3f4c3a35cee92bb95b.pts 02691156/expert_verified/points_label/cf0cdaa94220ee3f4c3a35cee92bb95b.seg 02691156 +02691156/points/48cb2de06f46cde25ed29e0a9f14425.pts 02691156/expert_verified/points_label/48cb2de06f46cde25ed29e0a9f14425.seg 02691156 +03001627/points/2f0a94efe6d1da7f8616812464c86290.pts 03001627/expert_verified/points_label/2f0a94efe6d1da7f8616812464c86290.seg 03001627 +02691156/points/e0385af10bddc6a0ca8607f540cc62ba.pts 02691156/expert_verified/points_label/e0385af10bddc6a0ca8607f540cc62ba.seg 02691156 +03467517/points/71139bd2ff6c4257280ec2e5049bb369.pts 03467517/expert_verified/points_label/71139bd2ff6c4257280ec2e5049bb369.seg 03467517 +03001627/points/6251b398004a02fffebad4f49b26ec52.pts 03001627/expert_verified/points_label/6251b398004a02fffebad4f49b26ec52.seg 03001627 +03467517/points/7eba657565cc69e913f86abea5e4b9e0.pts 03467517/expert_verified/points_label/7eba657565cc69e913f86abea5e4b9e0.seg 03467517 +03001627/points/8d2fd4b9c583e1e6a12cdfe22cdc2f5d.pts 03001627/expert_verified/points_label/8d2fd4b9c583e1e6a12cdfe22cdc2f5d.seg 03001627 +03001627/points/ffa1e25f499e586694e98ee4fdfd7464.pts 03001627/expert_verified/points_label/ffa1e25f499e586694e98ee4fdfd7464.seg 03001627 +03797390/points/9af98540f45411467246665d3d3724c.pts 03797390/expert_verified/points_label/9af98540f45411467246665d3d3724c.seg 03797390 +02691156/points/b9fabfa6d5fedbc3a8e091cb544689d5.pts 02691156/expert_verified/points_label/b9fabfa6d5fedbc3a8e091cb544689d5.seg 02691156 +04379243/points/a2561614d015f2fdfebad4f49b26ec52.pts 04379243/expert_verified/points_label/a2561614d015f2fdfebad4f49b26ec52.seg 04379243 +03642806/points/2134ad3fc25a6284193a4c984002ed32.pts 03642806/expert_verified/points_label/2134ad3fc25a6284193a4c984002ed32.seg 03642806 +03001627/points/d3302b7fa6504cab1a461b43b8f257f.pts 03001627/expert_verified/points_label/d3302b7fa6504cab1a461b43b8f257f.seg 03001627 +03467517/points/bf7026f9814230414269db3f92b7aa5e.pts 03467517/expert_verified/points_label/bf7026f9814230414269db3f92b7aa5e.seg 03467517 +03636649/points/9aff9fdad0e3555c7eecb4e0df212ad9.pts 03636649/expert_verified/points_label/9aff9fdad0e3555c7eecb4e0df212ad9.seg 03636649 +03797390/points/a3cd44bbd3ba5b019a4cbf5d3b79df06.pts 03797390/expert_verified/points_label/a3cd44bbd3ba5b019a4cbf5d3b79df06.seg 03797390 +04099429/points/eff3a27a085e02e5146be45f8a3c1ff8.pts 04099429/expert_verified/points_label/eff3a27a085e02e5146be45f8a3c1ff8.seg 04099429 +02958343/points/1e3f494626a24badf35b4953d8add91f.pts 02958343/expert_verified/points_label/1e3f494626a24badf35b4953d8add91f.seg 02958343 +04379243/points/1f3e217cbc871152d7465eca206fda6f.pts 04379243/expert_verified/points_label/1f3e217cbc871152d7465eca206fda6f.seg 04379243 +03636649/points/cef6757831b4d9738c8f019f17f4687c.pts 03636649/expert_verified/points_label/cef6757831b4d9738c8f019f17f4687c.seg 03636649 +04379243/points/e8689b8b1610bf2841bb8a7ba579a58.pts 04379243/expert_verified/points_label/e8689b8b1610bf2841bb8a7ba579a58.seg 04379243 +03001627/points/40168f46019eb867be7e1d42d63ca9f0.pts 03001627/expert_verified/points_label/40168f46019eb867be7e1d42d63ca9f0.seg 03001627 +03624134/points/7aed22a7074f16431cf05d6e4dbb95af.pts 03624134/expert_verified/points_label/7aed22a7074f16431cf05d6e4dbb95af.seg 03624134 +04379243/points/5d53ed3005f4dc6856786b90799c4fdb.pts 04379243/expert_verified/points_label/5d53ed3005f4dc6856786b90799c4fdb.seg 04379243 +04379243/points/beebc267ea0c16a5c7f6a57f6f73d8a6.pts 04379243/expert_verified/points_label/beebc267ea0c16a5c7f6a57f6f73d8a6.seg 04379243 +04379243/points/943d786e2df9251ec76aead7da70af41.pts 04379243/expert_verified/points_label/943d786e2df9251ec76aead7da70af41.seg 04379243 +04379243/points/90d87b4d9a5a1e78f4b6538438a0b930.pts 04379243/expert_verified/points_label/90d87b4d9a5a1e78f4b6538438a0b930.seg 04379243 +02958343/points/d47353fc60390df85d918097f81825e3.pts 02958343/expert_verified/points_label/d47353fc60390df85d918097f81825e3.seg 02958343 +03624134/points/90021da7c71f6bcbf02ee453ff283e26.pts 03624134/expert_verified/points_label/90021da7c71f6bcbf02ee453ff283e26.seg 03624134 +02958343/points/d1acd4916d3d3b57c48db2ed8f5e994c.pts 02958343/expert_verified/points_label/d1acd4916d3d3b57c48db2ed8f5e994c.seg 02958343 +03001627/points/1d1c829a54f0ae426cdb122727dd360f.pts 03001627/expert_verified/points_label/1d1c829a54f0ae426cdb122727dd360f.seg 03001627 +04379243/points/c35a14f84985f92a9856fa70a578baeb.pts 04379243/expert_verified/points_label/c35a14f84985f92a9856fa70a578baeb.seg 04379243 +03636649/points/5c5119a226e1ce9934804d261199e1bf.pts 03636649/expert_verified/points_label/5c5119a226e1ce9934804d261199e1bf.seg 03636649 +03636649/points/6bb8020fa82b27dde11a3e838aa2c287.pts 03636649/expert_verified/points_label/6bb8020fa82b27dde11a3e838aa2c287.seg 03636649 +03797390/points/fad118b32085f3f2c2c72e575af174cd.pts 03797390/expert_verified/points_label/fad118b32085f3f2c2c72e575af174cd.seg 03797390 +04379243/points/a82387cf9d9d253aa06f94abffad1304.pts 04379243/expert_verified/points_label/a82387cf9d9d253aa06f94abffad1304.seg 04379243 +03948459/points/a7a340a901d63486260a770f90456bf7.pts 03948459/expert_verified/points_label/a7a340a901d63486260a770f90456bf7.seg 03948459 +03624134/points/60e7b05ddeeb48eb37fa2c3ecb75f337.pts 03624134/expert_verified/points_label/60e7b05ddeeb48eb37fa2c3ecb75f337.seg 03624134 +02958343/points/3e2c3cb4f4c65b9cde9d4070fcdfa604.pts 02958343/expert_verified/points_label/3e2c3cb4f4c65b9cde9d4070fcdfa604.seg 02958343 +03001627/points/d58df0968070bf3b4b3c42e318f3affc.pts 03001627/expert_verified/points_label/d58df0968070bf3b4b3c42e318f3affc.seg 03001627 +04379243/points/4a3641784a9ecca04fa8d6439169bda4.pts 04379243/expert_verified/points_label/4a3641784a9ecca04fa8d6439169bda4.seg 04379243 +04225987/points/d31aaca67fd8ef1827d17dabad15093.pts 04225987/expert_verified/points_label/d31aaca67fd8ef1827d17dabad15093.seg 04225987 +03001627/points/c51937167dd0db45f7628281ecb18112.pts 03001627/expert_verified/points_label/c51937167dd0db45f7628281ecb18112.seg 03001627 +04379243/points/768cb2332a16fd63855931d119219022.pts 04379243/expert_verified/points_label/768cb2332a16fd63855931d119219022.seg 04379243 +03001627/points/8c76176c82e3e42d283b00891f680579.pts 03001627/expert_verified/points_label/8c76176c82e3e42d283b00891f680579.seg 03001627 +03001627/points/d4d9b991ff7d31e8c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/d4d9b991ff7d31e8c8687ff9b0b4e4ac.seg 03001627 +03797390/points/162201dfe14b73f0281365259d1cf342.pts 03797390/expert_verified/points_label/162201dfe14b73f0281365259d1cf342.seg 03797390 +04379243/points/ed1e06e886b5514fe8f49d7c9e73ab9.pts 04379243/expert_verified/points_label/ed1e06e886b5514fe8f49d7c9e73ab9.seg 04379243 +03636649/points/90651b3febfc3afe15226aa76eb7c3e.pts 03636649/expert_verified/points_label/90651b3febfc3afe15226aa76eb7c3e.seg 03636649 +04379243/points/24b208dd138d8af36210db75a4cd581b.pts 04379243/expert_verified/points_label/24b208dd138d8af36210db75a4cd581b.seg 04379243 +03001627/points/439418b35f600f4bb10dc0fca58d0b2c.pts 03001627/expert_verified/points_label/439418b35f600f4bb10dc0fca58d0b2c.seg 03001627 +03636649/points/88257c5a48d94b1e2b151d8b52c53b90.pts 03636649/expert_verified/points_label/88257c5a48d94b1e2b151d8b52c53b90.seg 03636649 +02691156/points/ad546b049b2246bd609e2d916fa0da27.pts 02691156/expert_verified/points_label/ad546b049b2246bd609e2d916fa0da27.seg 02691156 +03001627/points/7efeece3b5cf2853d706779c93538ee1.pts 03001627/expert_verified/points_label/7efeece3b5cf2853d706779c93538ee1.seg 03001627 +04379243/points/30dd74f09af6b1c2fe5c8ffd0f5eba47.pts 04379243/expert_verified/points_label/30dd74f09af6b1c2fe5c8ffd0f5eba47.seg 04379243 +02691156/points/752d9a010346862551cfdb4c9f126c12.pts 02691156/expert_verified/points_label/752d9a010346862551cfdb4c9f126c12.seg 02691156 +03001627/points/d1237422881f4d22ff25b0c2db862d19.pts 03001627/expert_verified/points_label/d1237422881f4d22ff25b0c2db862d19.seg 03001627 +04379243/points/95af60aa8cb9be066a76e23e6f966dea.pts 04379243/expert_verified/points_label/95af60aa8cb9be066a76e23e6f966dea.seg 04379243 +02691156/points/556d2b99469e62e623a346a784afd6ba.pts 02691156/expert_verified/points_label/556d2b99469e62e623a346a784afd6ba.seg 02691156 +04379243/points/6e23179a3559775a65eacc25f128a1c5.pts 04379243/expert_verified/points_label/6e23179a3559775a65eacc25f128a1c5.seg 04379243 +02691156/points/3b82e575165383903c83f6e156ad107a.pts 02691156/expert_verified/points_label/3b82e575165383903c83f6e156ad107a.seg 02691156 +02773838/points/71ead7f072106c63ed13f430b2941481.pts 02773838/expert_verified/points_label/71ead7f072106c63ed13f430b2941481.seg 02773838 +03001627/points/c9d68e1e5309ac25ac57e7d566628472.pts 03001627/expert_verified/points_label/c9d68e1e5309ac25ac57e7d566628472.seg 03001627 +02691156/points/b3a59a941500e76535592b447835a16e.pts 02691156/expert_verified/points_label/b3a59a941500e76535592b447835a16e.seg 02691156 +03797390/points/4d9764afa3fbeb1b6c69dceb67157a66.pts 03797390/expert_verified/points_label/4d9764afa3fbeb1b6c69dceb67157a66.seg 03797390 +04379243/points/68ea1f319a9d724ec3bd24f986301745.pts 04379243/expert_verified/points_label/68ea1f319a9d724ec3bd24f986301745.seg 04379243 +03001627/points/30363681727c804095937f6e581cbd41.pts 03001627/expert_verified/points_label/30363681727c804095937f6e581cbd41.seg 03001627 +03001627/points/f4f1aba65ebe48eb70930286c914896b.pts 03001627/expert_verified/points_label/f4f1aba65ebe48eb70930286c914896b.seg 03001627 +02691156/points/a3fc9ef9f611a783525e60273896d30a.pts 02691156/expert_verified/points_label/a3fc9ef9f611a783525e60273896d30a.seg 02691156 +03636649/points/b0871c4ac8505d9c3d39d8012919dd25.pts 03636649/expert_verified/points_label/b0871c4ac8505d9c3d39d8012919dd25.seg 03636649 +03001627/points/d7e26a070ee3b35cdf6cfab91d65bb91.pts 03001627/expert_verified/points_label/d7e26a070ee3b35cdf6cfab91d65bb91.seg 03001627 +04379243/points/9012c6ca245c1bf4e6c5cd45aa112726.pts 04379243/expert_verified/points_label/9012c6ca245c1bf4e6c5cd45aa112726.seg 04379243 +03636649/points/3ab9e4300cee0259f72e8839e840c146.pts 03636649/expert_verified/points_label/3ab9e4300cee0259f72e8839e840c146.seg 03636649 +04379243/points/6e0fed54fcae8a62edccc47bf0dcf5d3.pts 04379243/expert_verified/points_label/6e0fed54fcae8a62edccc47bf0dcf5d3.seg 04379243 +04379243/points/aafc579804cc095cbababe11fcea8796.pts 04379243/expert_verified/points_label/aafc579804cc095cbababe11fcea8796.seg 04379243 +03636649/points/9adee08c737c7c134c6deb9ede0648df.pts 03636649/expert_verified/points_label/9adee08c737c7c134c6deb9ede0648df.seg 03636649 +02691156/points/f39985959d394f8c863ab010b80d9ed.pts 02691156/expert_verified/points_label/f39985959d394f8c863ab010b80d9ed.seg 02691156 +04379243/points/23d4170c7a0a2a014b3c42e318f3affc.pts 04379243/expert_verified/points_label/23d4170c7a0a2a014b3c42e318f3affc.seg 04379243 +04379243/points/a1593fbe3a78c7858795000a72749c36.pts 04379243/expert_verified/points_label/a1593fbe3a78c7858795000a72749c36.seg 04379243 +03001627/points/4b2ede169dcc83ce4591019e9d133858.pts 03001627/expert_verified/points_label/4b2ede169dcc83ce4591019e9d133858.seg 03001627 +03001627/points/3fa1eeed2e8e2534febad4f49b26ec52.pts 03001627/expert_verified/points_label/3fa1eeed2e8e2534febad4f49b26ec52.seg 03001627 +04379243/points/e8ba9621aef9395a3019620286259e2c.pts 04379243/expert_verified/points_label/e8ba9621aef9395a3019620286259e2c.seg 04379243 +03001627/points/875925d42780159ffebad4f49b26ec52.pts 03001627/expert_verified/points_label/875925d42780159ffebad4f49b26ec52.seg 03001627 +03001627/points/548ab6b6e8b2dc505ff61a3a2a0e2484.pts 03001627/expert_verified/points_label/548ab6b6e8b2dc505ff61a3a2a0e2484.seg 03001627 +03467517/points/4f401d78068a9d348ee96618ee16ca27.pts 03467517/expert_verified/points_label/4f401d78068a9d348ee96618ee16ca27.seg 03467517 +04379243/points/f7600660924857c0d31d0d81bfe9c743.pts 04379243/expert_verified/points_label/f7600660924857c0d31d0d81bfe9c743.seg 04379243 +04379243/points/edba7eb533ae3578ece232edf44331c7.pts 04379243/expert_verified/points_label/edba7eb533ae3578ece232edf44331c7.seg 04379243 +03001627/points/8b8fa92f9c677b0713decb1a0563b12.pts 03001627/expert_verified/points_label/8b8fa92f9c677b0713decb1a0563b12.seg 03001627 +02691156/points/81e6b629264dad5daf2c6c19cc41708a.pts 02691156/expert_verified/points_label/81e6b629264dad5daf2c6c19cc41708a.seg 02691156 +02691156/points/a0a7e673a1e1bca78699933784576e73.pts 02691156/expert_verified/points_label/a0a7e673a1e1bca78699933784576e73.seg 02691156 +03636649/points/f01358d4f45cae23ce670f026edf07e5.pts 03636649/expert_verified/points_label/f01358d4f45cae23ce670f026edf07e5.seg 03636649 +03001627/points/808fa82fe9ad86d9f1cc184b6fa3e1f9.pts 03001627/expert_verified/points_label/808fa82fe9ad86d9f1cc184b6fa3e1f9.seg 03001627 +02691156/points/57937c7ab42260ebf119374ee5d5f944.pts 02691156/expert_verified/points_label/57937c7ab42260ebf119374ee5d5f944.seg 02691156 +03001627/points/fbddac94cfa74a7b5c0228148b88226c.pts 03001627/expert_verified/points_label/fbddac94cfa74a7b5c0228148b88226c.seg 03001627 +04379243/points/ad92bfc65465091c48d90eef8384210.pts 04379243/expert_verified/points_label/ad92bfc65465091c48d90eef8384210.seg 04379243 +03467517/points/6ce23c82af30b629e8f705eb96ba3376.pts 03467517/expert_verified/points_label/6ce23c82af30b629e8f705eb96ba3376.seg 03467517 +03001627/points/bd1787066323c7a64424fc4d3c9cb157.pts 03001627/expert_verified/points_label/bd1787066323c7a64424fc4d3c9cb157.seg 03001627 +03001627/points/uca24feec-f0c0-454c-baaf-561530686f40.pts 03001627/expert_verified/points_label/uca24feec-f0c0-454c-baaf-561530686f40.seg 03001627 +03001627/points/226704c72560008421ceb39dc3069834.pts 03001627/expert_verified/points_label/226704c72560008421ceb39dc3069834.seg 03001627 +02691156/points/2c49289098e4492bca8607f540cc62ba.pts 02691156/expert_verified/points_label/2c49289098e4492bca8607f540cc62ba.seg 02691156 +03001627/points/cff9a523a9e20eaeb40f0ac0fb9a650d.pts 03001627/expert_verified/points_label/cff9a523a9e20eaeb40f0ac0fb9a650d.seg 03001627 +04379243/points/38e90183c838f443b43753a53e4593db.pts 04379243/expert_verified/points_label/38e90183c838f443b43753a53e4593db.seg 04379243 +04379243/points/8b4ec70a3c1283b1fb5f8baea920e189.pts 04379243/expert_verified/points_label/8b4ec70a3c1283b1fb5f8baea920e189.seg 04379243 +04379243/points/59a1703cb9320c018f49a52c8d710d0f.pts 04379243/expert_verified/points_label/59a1703cb9320c018f49a52c8d710d0f.seg 04379243 +03636649/points/4ba237c2c40313f373b3ec02b97cb0f.pts 03636649/expert_verified/points_label/4ba237c2c40313f373b3ec02b97cb0f.seg 03636649 +04379243/points/bb027ed892722b1f3399de188dc5ee56.pts 04379243/expert_verified/points_label/bb027ed892722b1f3399de188dc5ee56.seg 04379243 +03467517/points/8b1d0f73e54ef59c93f0194265a9746c.pts 03467517/expert_verified/points_label/8b1d0f73e54ef59c93f0194265a9746c.seg 03467517 +03467517/points/1300e8bafb819c8e1887f40a4f62df44.pts 03467517/expert_verified/points_label/1300e8bafb819c8e1887f40a4f62df44.seg 03467517 +03642806/points/9fa387d7f442b96e75e60c00fabe2744.pts 03642806/expert_verified/points_label/9fa387d7f442b96e75e60c00fabe2744.seg 03642806 +04379243/points/e153f757330a4ea3cdd1f51ef2b8f2ed.pts 04379243/expert_verified/points_label/e153f757330a4ea3cdd1f51ef2b8f2ed.seg 04379243 +03636649/points/d00157a022079bdef3655a2ce983ab1f.pts 03636649/expert_verified/points_label/d00157a022079bdef3655a2ce983ab1f.seg 03636649 +04379243/points/9eeea5f7b030ff6ac155f88004a92bc8.pts 04379243/expert_verified/points_label/9eeea5f7b030ff6ac155f88004a92bc8.seg 04379243 +04379243/points/10ed64b4c7eb6d9311ee7ca4f000feba.pts 04379243/expert_verified/points_label/10ed64b4c7eb6d9311ee7ca4f000feba.seg 04379243 +03001627/points/6db2255a51caf84e823e7e244bf84209.pts 03001627/expert_verified/points_label/6db2255a51caf84e823e7e244bf84209.seg 03001627 +03001627/points/8ddaa112e6ba36b5b1e23c7675c49239.pts 03001627/expert_verified/points_label/8ddaa112e6ba36b5b1e23c7675c49239.seg 03001627 +04379243/points/7813f4e4c0a58118cbb8bac2032149c.pts 04379243/expert_verified/points_label/7813f4e4c0a58118cbb8bac2032149c.seg 04379243 +03797390/points/336122c3105440d193e42e2720468bf0.pts 03797390/expert_verified/points_label/336122c3105440d193e42e2720468bf0.seg 03797390 +03001627/points/f2e2993abf4c952b2e69a7e134f91051.pts 03001627/expert_verified/points_label/f2e2993abf4c952b2e69a7e134f91051.seg 03001627 +04379243/points/627248fa64c1db5fab610b0c94236463.pts 04379243/expert_verified/points_label/627248fa64c1db5fab610b0c94236463.seg 04379243 +04379243/points/3b465822b34ed20ca05d3424fd8d541a.pts 04379243/expert_verified/points_label/3b465822b34ed20ca05d3424fd8d541a.seg 04379243 +03467517/points/a7ddf2e5b9dc278293f0194265a9746c.pts 03467517/expert_verified/points_label/a7ddf2e5b9dc278293f0194265a9746c.seg 03467517 +03636649/points/b36bfbbc98cb45431735ea0e092a805a.pts 03636649/expert_verified/points_label/b36bfbbc98cb45431735ea0e092a805a.seg 03636649 +04379243/points/7d14ae7d0b7338bda0ab1d82ef09f78f.pts 04379243/expert_verified/points_label/7d14ae7d0b7338bda0ab1d82ef09f78f.seg 04379243 +03467517/points/f7645b3c690d954682c2412261cb8600.pts 03467517/expert_verified/points_label/f7645b3c690d954682c2412261cb8600.seg 03467517 +02958343/points/41a6deadd39b4c754d0f9a1ef5f184fe.pts 02958343/expert_verified/points_label/41a6deadd39b4c754d0f9a1ef5f184fe.seg 02958343 +02691156/points/f74cbd91e6fb40dfce5965228d7e8c9f.pts 02691156/expert_verified/points_label/f74cbd91e6fb40dfce5965228d7e8c9f.seg 02691156 +04379243/points/6c4c3bfe275e66b1b75e606711562bfc.pts 04379243/expert_verified/points_label/6c4c3bfe275e66b1b75e606711562bfc.seg 04379243 +04379243/points/7d358a01c9467815a9505c473725122e.pts 04379243/expert_verified/points_label/7d358a01c9467815a9505c473725122e.seg 04379243 +04379243/points/5fe3476df92392e1397aad305ec14786.pts 04379243/expert_verified/points_label/5fe3476df92392e1397aad305ec14786.seg 04379243 +03001627/points/34d3960d35d8d5219b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/34d3960d35d8d5219b9f2eb77f5e247e.seg 03001627 +03001627/points/1b67a3a1101a9acb905477d2a8504646.pts 03001627/expert_verified/points_label/1b67a3a1101a9acb905477d2a8504646.seg 03001627 +03001627/points/ee4858f78dc33591100e9bd5c4b0af54.pts 03001627/expert_verified/points_label/ee4858f78dc33591100e9bd5c4b0af54.seg 03001627 +03001627/points/a578b0027e7d9ec7b2ca3ea77e53abe.pts 03001627/expert_verified/points_label/a578b0027e7d9ec7b2ca3ea77e53abe.seg 03001627 +02691156/points/916950e40ca7aabc8b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/916950e40ca7aabc8b96ae1a0a8b84ec.seg 02691156 +04379243/points/1abfb0c03c81fc2219fb4103277a6b93.pts 04379243/expert_verified/points_label/1abfb0c03c81fc2219fb4103277a6b93.seg 04379243 +02691156/points/a702da03d770f5096e2738fc9da60e6f.pts 02691156/expert_verified/points_label/a702da03d770f5096e2738fc9da60e6f.seg 02691156 +04379243/points/2e2894138df855b26f88aa1b7f7cc6c6.pts 04379243/expert_verified/points_label/2e2894138df855b26f88aa1b7f7cc6c6.seg 04379243 +03001627/points/589cd6a1f4367fd834b707eb14b2a4a5.pts 03001627/expert_verified/points_label/589cd6a1f4367fd834b707eb14b2a4a5.seg 03001627 +03636649/points/f8534299ecce5c16eaf14273fa406ffc.pts 03636649/expert_verified/points_label/f8534299ecce5c16eaf14273fa406ffc.seg 03636649 +04379243/points/ea96b8a866121d1abed1bd9593e318c.pts 04379243/expert_verified/points_label/ea96b8a866121d1abed1bd9593e318c.seg 04379243 +03624134/points/9746101f20473d346bbd83c2bc4c3b2e.pts 03624134/expert_verified/points_label/9746101f20473d346bbd83c2bc4c3b2e.seg 03624134 +02958343/points/9c4a3879c71df693af0f25977186b501.pts 02958343/expert_verified/points_label/9c4a3879c71df693af0f25977186b501.seg 02958343 +03001627/points/6621723f7af35f2dcd344c2b2cefcda6.pts 03001627/expert_verified/points_label/6621723f7af35f2dcd344c2b2cefcda6.seg 03001627 +03948459/points/8c9e592c95f95e7c9a6e43b878d5b335.pts 03948459/expert_verified/points_label/8c9e592c95f95e7c9a6e43b878d5b335.seg 03948459 +04379243/points/36a6d851dbe02410ad16260d4d73b56.pts 04379243/expert_verified/points_label/36a6d851dbe02410ad16260d4d73b56.seg 04379243 +04379243/points/b1ca280d9567270ade98d10ab5975b59.pts 04379243/expert_verified/points_label/b1ca280d9567270ade98d10ab5975b59.seg 04379243 +03467517/points/5ed99a0b793e1f5ee52744498b9b3051.pts 03467517/expert_verified/points_label/5ed99a0b793e1f5ee52744498b9b3051.seg 03467517 +03001627/points/18fd8342fa5d1d4f5268b70948af88b2.pts 03001627/expert_verified/points_label/18fd8342fa5d1d4f5268b70948af88b2.seg 03001627 +02691156/points/cc60baa1a796f5c14c3a35cee92bb95b.pts 02691156/expert_verified/points_label/cc60baa1a796f5c14c3a35cee92bb95b.seg 02691156 +03642806/points/3237f5cd4bca555955357c338ec9641.pts 03642806/expert_verified/points_label/3237f5cd4bca555955357c338ec9641.seg 03642806 +03001627/points/fee248777c9c4807f8bc1f8036e08e44.pts 03001627/expert_verified/points_label/fee248777c9c4807f8bc1f8036e08e44.seg 03001627 +04379243/points/2d90a1998eca8778dcfcef693e7ec696.pts 04379243/expert_verified/points_label/2d90a1998eca8778dcfcef693e7ec696.seg 04379243 +02958343/points/3ef7cfbc172840b2393bf61b30c528bb.pts 02958343/expert_verified/points_label/3ef7cfbc172840b2393bf61b30c528bb.seg 02958343 +02691156/points/240fd3c1fd804ec1b8cf782e8c539948.pts 02691156/expert_verified/points_label/240fd3c1fd804ec1b8cf782e8c539948.seg 02691156 +04379243/points/60c931dcc6d0982944bda2555d37e46.pts 04379243/expert_verified/points_label/60c931dcc6d0982944bda2555d37e46.seg 04379243 +04379243/points/93040a14fad5588ed889130b88839a0c.pts 04379243/expert_verified/points_label/93040a14fad5588ed889130b88839a0c.seg 04379243 +02958343/points/a75ff576da012340468bac13e007a6e9.pts 02958343/expert_verified/points_label/a75ff576da012340468bac13e007a6e9.seg 02958343 +03467517/points/57286d92604c9ebea3d3eb77b119df6d.pts 03467517/expert_verified/points_label/57286d92604c9ebea3d3eb77b119df6d.seg 03467517 +03636649/points/913ba6b6ac6aea3356c82fefb25b338b.pts 03636649/expert_verified/points_label/913ba6b6ac6aea3356c82fefb25b338b.seg 03636649 +03001627/points/cce9ffdcc7ca8ddea300840c9d7bfa74.pts 03001627/expert_verified/points_label/cce9ffdcc7ca8ddea300840c9d7bfa74.seg 03001627 +04379243/points/913c0ff011ad0658dcfcef693e7ec696.pts 04379243/expert_verified/points_label/913c0ff011ad0658dcfcef693e7ec696.seg 04379243 +03001627/points/9d0b25421c13008e35836c728d324152.pts 03001627/expert_verified/points_label/9d0b25421c13008e35836c728d324152.seg 03001627 +03797390/points/a8f7a0edd3edc3299e54b4084dc33544.pts 03797390/expert_verified/points_label/a8f7a0edd3edc3299e54b4084dc33544.seg 03797390 +04379243/points/5b9a7b7952996844d802aa676be38da2.pts 04379243/expert_verified/points_label/5b9a7b7952996844d802aa676be38da2.seg 04379243 +02954340/points/4bd0b6df02772d8f59c9250a427b57f.pts 02954340/expert_verified/points_label/4bd0b6df02772d8f59c9250a427b57f.seg 02954340 +02958343/points/a72134cd499fd1c4f79e091fa09130a.pts 02958343/expert_verified/points_label/a72134cd499fd1c4f79e091fa09130a.seg 02958343 +04379243/points/cc6fbdc6f2aa5ea3d889130b88839a0c.pts 04379243/expert_verified/points_label/cc6fbdc6f2aa5ea3d889130b88839a0c.seg 04379243 +03624134/points/85ced924eedc6ff566b5b592ed1ddee0.pts 03624134/expert_verified/points_label/85ced924eedc6ff566b5b592ed1ddee0.seg 03624134 +03001627/points/60622d74c0712934a5817f81a1efa3cc.pts 03001627/expert_verified/points_label/60622d74c0712934a5817f81a1efa3cc.seg 03001627 +04379243/points/2633f011b236a8979070b65ce7b4b532.pts 04379243/expert_verified/points_label/2633f011b236a8979070b65ce7b4b532.seg 04379243 +03001627/points/9d9d69e5f2bc80a867903707764646db.pts 03001627/expert_verified/points_label/9d9d69e5f2bc80a867903707764646db.seg 03001627 +03001627/points/ce463d63d8771c5ccf19858fd1963d10.pts 03001627/expert_verified/points_label/ce463d63d8771c5ccf19858fd1963d10.seg 03001627 +04379243/points/ad17445446e4fd3adcfcef693e7ec696.pts 04379243/expert_verified/points_label/ad17445446e4fd3adcfcef693e7ec696.seg 04379243 +03001627/points/71372c1f20b6a04c43c40c5aa3d5c5b7.pts 03001627/expert_verified/points_label/71372c1f20b6a04c43c40c5aa3d5c5b7.seg 03001627 +02691156/points/9436273fc1a5e3ca7af159eaf7625abf.pts 02691156/expert_verified/points_label/9436273fc1a5e3ca7af159eaf7625abf.seg 02691156 +03797390/points/b98fa11a567f644344b25d683fe71de.pts 03797390/expert_verified/points_label/b98fa11a567f644344b25d683fe71de.seg 03797390 +02691156/points/53eee66291c47a91bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/53eee66291c47a91bc0909d98a1ff2b4.seg 02691156 +03642806/points/e55ececde88255b93e73f3893a7337bb.pts 03642806/expert_verified/points_label/e55ececde88255b93e73f3893a7337bb.seg 03642806 +02958343/points/1079efee042629d4ce28f0f1b509eda.pts 02958343/expert_verified/points_label/1079efee042629d4ce28f0f1b509eda.seg 02958343 +03001627/points/c826c65111c867ab45a1df43bcd9e471.pts 03001627/expert_verified/points_label/c826c65111c867ab45a1df43bcd9e471.seg 03001627 +02958343/points/39201299cf83ec2577763486d77d1cb.pts 02958343/expert_verified/points_label/39201299cf83ec2577763486d77d1cb.seg 02958343 +04379243/points/e8c01f71fd941af11190e285a2cbc9c.pts 04379243/expert_verified/points_label/e8c01f71fd941af11190e285a2cbc9c.seg 04379243 +03001627/points/948f1555282e27da190c615a2115d2f7.pts 03001627/expert_verified/points_label/948f1555282e27da190c615a2115d2f7.seg 03001627 +02691156/points/ca4ec545363b3b8e8c2814a4ead9cb90.pts 02691156/expert_verified/points_label/ca4ec545363b3b8e8c2814a4ead9cb90.seg 02691156 +03001627/points/b8f4ce34b44620cc9b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/b8f4ce34b44620cc9b9f2eb77f5e247e.seg 03001627 +02958343/points/188621bbfc7d9477ce27281f3b76d1f5.pts 02958343/expert_verified/points_label/188621bbfc7d9477ce27281f3b76d1f5.seg 02958343 +04379243/points/9a71b92445cd3f023a9bc242c86fb7a0.pts 04379243/expert_verified/points_label/9a71b92445cd3f023a9bc242c86fb7a0.seg 04379243 +03001627/points/4372b33dfc84c2f56a9ab6fc87e1604e.pts 03001627/expert_verified/points_label/4372b33dfc84c2f56a9ab6fc87e1604e.seg 03001627 +03001627/points/b16f1858c1a7c0a65001cb19c4a0eee4.pts 03001627/expert_verified/points_label/b16f1858c1a7c0a65001cb19c4a0eee4.seg 03001627 +03467517/points/5238adec0790595930c206f77b5cb4d0.pts 03467517/expert_verified/points_label/5238adec0790595930c206f77b5cb4d0.seg 03467517 +02958343/points/3ec7f0347638f7a891eea2fc80d4a25f.pts 02958343/expert_verified/points_label/3ec7f0347638f7a891eea2fc80d4a25f.seg 02958343 +02691156/points/32e7224d196e5866bd564bd76cf3cbec.pts 02691156/expert_verified/points_label/32e7224d196e5866bd564bd76cf3cbec.seg 02691156 +04379243/points/f9beeefdebf70350f4b6538438a0b930.pts 04379243/expert_verified/points_label/f9beeefdebf70350f4b6538438a0b930.seg 04379243 +04379243/points/acbc99e153b9d4d419fb4103277a6b93.pts 04379243/expert_verified/points_label/acbc99e153b9d4d419fb4103277a6b93.seg 04379243 +03467517/points/8ebc3d48afeceec752561cc0fb924c36.pts 03467517/expert_verified/points_label/8ebc3d48afeceec752561cc0fb924c36.seg 03467517 +04379243/points/966cef675324e416cd415550f639925.pts 04379243/expert_verified/points_label/966cef675324e416cd415550f639925.seg 04379243 +03636649/points/85f71a4724fa37c33d39d8012919dd25.pts 03636649/expert_verified/points_label/85f71a4724fa37c33d39d8012919dd25.seg 03636649 +03636649/points/370623095c9773e42ce7d46577f8a9bd.pts 03636649/expert_verified/points_label/370623095c9773e42ce7d46577f8a9bd.seg 03636649 +03624134/points/bbe934c9cdca9c1839ec49305bb07d3d.pts 03624134/expert_verified/points_label/bbe934c9cdca9c1839ec49305bb07d3d.seg 03624134 +02958343/points/d22a2d20acbdca70c972ff3f74d38438.pts 02958343/expert_verified/points_label/d22a2d20acbdca70c972ff3f74d38438.seg 02958343 +02958343/points/ff3c8e21a48ed17cc1bcae9def1986da.pts 02958343/expert_verified/points_label/ff3c8e21a48ed17cc1bcae9def1986da.seg 02958343 +03001627/points/fd5ca05b59b30241d838ae16242881dc.pts 03001627/expert_verified/points_label/fd5ca05b59b30241d838ae16242881dc.seg 03001627 +02691156/points/e3aff5ae3e8f2a7c4c2c88971423d0be.pts 02691156/expert_verified/points_label/e3aff5ae3e8f2a7c4c2c88971423d0be.seg 02691156 +02691156/points/b4575e5e6161fd497b164268a44f7712.pts 02691156/expert_verified/points_label/b4575e5e6161fd497b164268a44f7712.seg 02691156 +03467517/points/153e7883f6cf0e66d57700c05b1862d8.pts 03467517/expert_verified/points_label/153e7883f6cf0e66d57700c05b1862d8.seg 03467517 +03642806/points/4fc3d56243d2d8801ef1ccfaf50f2048.pts 03642806/expert_verified/points_label/4fc3d56243d2d8801ef1ccfaf50f2048.seg 03642806 +04379243/points/ec9861c234daf6bc915f51b5f5e95ffa.pts 04379243/expert_verified/points_label/ec9861c234daf6bc915f51b5f5e95ffa.seg 04379243 +03001627/points/7114ef00fe68d053cccbd142483bf2e7.pts 03001627/expert_verified/points_label/7114ef00fe68d053cccbd142483bf2e7.seg 03001627 +02691156/points/e812f54386acd072d44f37c9e0fb10d0.pts 02691156/expert_verified/points_label/e812f54386acd072d44f37c9e0fb10d0.seg 02691156 +03001627/points/5490efbdadce792f524f4eb395a8604.pts 03001627/expert_verified/points_label/5490efbdadce792f524f4eb395a8604.seg 03001627 +03948459/points/42740af029297f1d9874fa4c7b1a4298.pts 03948459/expert_verified/points_label/42740af029297f1d9874fa4c7b1a4298.seg 03948459 +03001627/points/d1ec6e9b8063b7efd7f7a4c4609b0913.pts 03001627/expert_verified/points_label/d1ec6e9b8063b7efd7f7a4c4609b0913.seg 03001627 +04379243/points/4b11be42b0c0482dd94faaee2b20e2bf.pts 04379243/expert_verified/points_label/4b11be42b0c0482dd94faaee2b20e2bf.seg 04379243 +03001627/points/d29971cef754cc91cd8c5d1ba690a2c3.pts 03001627/expert_verified/points_label/d29971cef754cc91cd8c5d1ba690a2c3.seg 03001627 +04379243/points/8cc8485f249a37f595b25bd3accf45b5.pts 04379243/expert_verified/points_label/8cc8485f249a37f595b25bd3accf45b5.seg 04379243 +04379243/points/bb5dbf708d5eb7f82099f9e22ca45b04.pts 04379243/expert_verified/points_label/bb5dbf708d5eb7f82099f9e22ca45b04.seg 04379243 +03001627/points/c1b64fef5f3efa0a129905ebfd12d5cd.pts 03001627/expert_verified/points_label/c1b64fef5f3efa0a129905ebfd12d5cd.seg 03001627 +04379243/points/e58e958428584b2b79972b30518c97e2.pts 04379243/expert_verified/points_label/e58e958428584b2b79972b30518c97e2.seg 04379243 +03790512/points/90a521e0def2631fd5dde04c96fd8146.pts 03790512/expert_verified/points_label/90a521e0def2631fd5dde04c96fd8146.seg 03790512 +03467517/points/fcab134da044e5fc77f469126771fc30.pts 03467517/expert_verified/points_label/fcab134da044e5fc77f469126771fc30.seg 03467517 +03001627/points/1d6faeb6d77d1f2cf95cd8df6bebbc3a.pts 03001627/expert_verified/points_label/1d6faeb6d77d1f2cf95cd8df6bebbc3a.seg 03001627 +04379243/points/e993ddaf6d03003071a782a4379556c7.pts 04379243/expert_verified/points_label/e993ddaf6d03003071a782a4379556c7.seg 04379243 +03001627/points/702cebffa33a19f019f079d1b712f46f.pts 03001627/expert_verified/points_label/702cebffa33a19f019f079d1b712f46f.seg 03001627 +03790512/points/7b4eb8cbc470d0d6d5dde04c96fd8146.pts 03790512/expert_verified/points_label/7b4eb8cbc470d0d6d5dde04c96fd8146.seg 03790512 +03001627/points/9515e377c1ec86529b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/9515e377c1ec86529b9f2eb77f5e247e.seg 03001627 +03001627/points/9c3d7b65c739a618285330f26226f8fb.pts 03001627/expert_verified/points_label/9c3d7b65c739a618285330f26226f8fb.seg 03001627 +03790512/points/8ed4bdaf0c8b88ea8b31e74d456742c7.pts 03790512/expert_verified/points_label/8ed4bdaf0c8b88ea8b31e74d456742c7.seg 03790512 +02958343/points/6ed2957beeb7940a9fbaa69916aaebda.pts 02958343/expert_verified/points_label/6ed2957beeb7940a9fbaa69916aaebda.seg 02958343 +03001627/points/37e2b82d5e9dde21cbde89e0c48a01bf.pts 03001627/expert_verified/points_label/37e2b82d5e9dde21cbde89e0c48a01bf.seg 03001627 +04379243/points/1b6bd64fda74bdc4d6983f351200ac6a.pts 04379243/expert_verified/points_label/1b6bd64fda74bdc4d6983f351200ac6a.seg 04379243 +04379243/points/531381f5bbc69e485769b3af36a2ff9f.pts 04379243/expert_verified/points_label/531381f5bbc69e485769b3af36a2ff9f.seg 04379243 +03790512/points/992fbae5178edcbc4e31d0cb4d7568.pts 03790512/expert_verified/points_label/992fbae5178edcbc4e31d0cb4d7568.seg 03790512 +04379243/points/65e7fd8d158658106a76e23e6f966dea.pts 04379243/expert_verified/points_label/65e7fd8d158658106a76e23e6f966dea.seg 04379243 +02691156/points/2229bc4e646f506679f56e78e8640bfb.pts 02691156/expert_verified/points_label/2229bc4e646f506679f56e78e8640bfb.seg 02691156 +02954340/points/f40b47fcbf83b962f0d11ae402ef940e.pts 02954340/expert_verified/points_label/f40b47fcbf83b962f0d11ae402ef940e.seg 02954340 +02773838/points/cbc2328cadf8dc573394926146371698.pts 02773838/expert_verified/points_label/cbc2328cadf8dc573394926146371698.seg 02773838 +02958343/points/3c6d7c6ce950917b3a93df79ef2b80ef.pts 02958343/expert_verified/points_label/3c6d7c6ce950917b3a93df79ef2b80ef.seg 02958343 +02958343/points/2ccaaa66525d7f095473e57e894e0ef5.pts 02958343/expert_verified/points_label/2ccaaa66525d7f095473e57e894e0ef5.seg 02958343 +02691156/points/70d9304de59792a9515d73fcb34092fc.pts 02691156/expert_verified/points_label/70d9304de59792a9515d73fcb34092fc.seg 02691156 +03001627/points/2ed8d45343a442097869557127addfc0.pts 03001627/expert_verified/points_label/2ed8d45343a442097869557127addfc0.seg 03001627 +04379243/points/84f5e52756fc84f86df14337f24e49f4.pts 04379243/expert_verified/points_label/84f5e52756fc84f86df14337f24e49f4.seg 04379243 +03001627/points/b33a3b1627ad61eb8ca4809dcf42fe1.pts 03001627/expert_verified/points_label/b33a3b1627ad61eb8ca4809dcf42fe1.seg 03001627 +04379243/points/369c19c0971221f3664b3b9b23ddfcbc.pts 04379243/expert_verified/points_label/369c19c0971221f3664b3b9b23ddfcbc.seg 04379243 +03642806/points/5a13f7551c20eb29f3ebfe51dc60263e.pts 03642806/expert_verified/points_label/5a13f7551c20eb29f3ebfe51dc60263e.seg 03642806 +04379243/points/1b01ef65920c342323bdffac38e6b250.pts 04379243/expert_verified/points_label/1b01ef65920c342323bdffac38e6b250.seg 04379243 +02691156/points/9b687f9cff46d43d89c2da356f872ebc.pts 02691156/expert_verified/points_label/9b687f9cff46d43d89c2da356f872ebc.seg 02691156 +04379243/points/746ceaf694d85eb5d5192f88466da1dc.pts 04379243/expert_verified/points_label/746ceaf694d85eb5d5192f88466da1dc.seg 04379243 +04379243/points/9f4eb0d734a2b7a4ab610b0c94236463.pts 04379243/expert_verified/points_label/9f4eb0d734a2b7a4ab610b0c94236463.seg 04379243 +03001627/points/a1213da0e7efffcafebad4f49b26ec52.pts 03001627/expert_verified/points_label/a1213da0e7efffcafebad4f49b26ec52.seg 03001627 +02958343/points/71b00ea32b1810ac373af83f3f2fe606.pts 02958343/expert_verified/points_label/71b00ea32b1810ac373af83f3f2fe606.seg 02958343 +02691156/points/52a84fea7c314f4c3dfc741b4df74043.pts 02691156/expert_verified/points_label/52a84fea7c314f4c3dfc741b4df74043.seg 02691156 +02958343/points/9f3c463272d13d39eb7780cdb3ece367.pts 02958343/expert_verified/points_label/9f3c463272d13d39eb7780cdb3ece367.seg 02958343 +03001627/points/def03f645b3fbd665bb93149cc0adf0.pts 03001627/expert_verified/points_label/def03f645b3fbd665bb93149cc0adf0.seg 03001627 +03001627/points/f9e386d968653602d68fb8f5d99affa0.pts 03001627/expert_verified/points_label/f9e386d968653602d68fb8f5d99affa0.seg 03001627 +03467517/points/9c399ebc617349dcd016bd20f13ab302.pts 03467517/expert_verified/points_label/9c399ebc617349dcd016bd20f13ab302.seg 03467517 +04379243/points/aaaba1bbe037d3b1e406974af41e8842.pts 04379243/expert_verified/points_label/aaaba1bbe037d3b1e406974af41e8842.seg 04379243 +03001627/points/4030ea84b560b857febad4f49b26ec52.pts 03001627/expert_verified/points_label/4030ea84b560b857febad4f49b26ec52.seg 03001627 +04379243/points/a38405108fb416d8356ca1f9220b9968.pts 04379243/expert_verified/points_label/a38405108fb416d8356ca1f9220b9968.seg 04379243 +04379243/points/f864677894410315ab610b0c94236463.pts 04379243/expert_verified/points_label/f864677894410315ab610b0c94236463.seg 04379243 +02954340/points/da5e5ec4c486d6c03baa6271927f050e.pts 02954340/expert_verified/points_label/da5e5ec4c486d6c03baa6271927f050e.seg 02954340 +02691156/points/eed299b690be51ffbd931fcaa69140.pts 02691156/expert_verified/points_label/eed299b690be51ffbd931fcaa69140.seg 02691156 +03797390/points/b4ae56d6638d5338de671f28c83d2dcb.pts 03797390/expert_verified/points_label/b4ae56d6638d5338de671f28c83d2dcb.seg 03797390 +04379243/points/10cc8c941fc8aeaa71a782a4379556c7.pts 04379243/expert_verified/points_label/10cc8c941fc8aeaa71a782a4379556c7.seg 04379243 +03636649/points/61b57e8b5da8fb13d527a9a6f5a872b9.pts 03636649/expert_verified/points_label/61b57e8b5da8fb13d527a9a6f5a872b9.seg 03636649 +02691156/points/ae4a9574248395b671d03b466c72ce41.pts 02691156/expert_verified/points_label/ae4a9574248395b671d03b466c72ce41.seg 02691156 +04379243/points/8cfe3ff92244310534506cc3910614fe.pts 04379243/expert_verified/points_label/8cfe3ff92244310534506cc3910614fe.seg 04379243 +03001627/points/597cb92a5bfb580eed98cca8f0ccd5f7.pts 03001627/expert_verified/points_label/597cb92a5bfb580eed98cca8f0ccd5f7.seg 03001627 +03001627/points/4231883e92a3c1a21c62d11641ffbd35.pts 03001627/expert_verified/points_label/4231883e92a3c1a21c62d11641ffbd35.seg 03001627 +03636649/points/28793511c46b4fa030f6e0ede20c4525.pts 03636649/expert_verified/points_label/28793511c46b4fa030f6e0ede20c4525.seg 03636649 +02958343/points/4c60f32b6efdc7217dfb1ee6a4b12bf8.pts 02958343/expert_verified/points_label/4c60f32b6efdc7217dfb1ee6a4b12bf8.seg 02958343 +04379243/points/397c56f15e547fad1bb088904f7cb154.pts 04379243/expert_verified/points_label/397c56f15e547fad1bb088904f7cb154.seg 04379243 +04379243/points/9bb816d6a3517a5ca74c2333655a11dd.pts 04379243/expert_verified/points_label/9bb816d6a3517a5ca74c2333655a11dd.seg 04379243 +03790512/points/bae59e64a50d3aa2f68f798d07e007b6.pts 03790512/expert_verified/points_label/bae59e64a50d3aa2f68f798d07e007b6.seg 03790512 +04379243/points/8b094873d775f6e21130871dbfe24c18.pts 04379243/expert_verified/points_label/8b094873d775f6e21130871dbfe24c18.seg 04379243 +04379243/points/4d2f7c689e77df6b6dc1766995c17a41.pts 04379243/expert_verified/points_label/4d2f7c689e77df6b6dc1766995c17a41.seg 04379243 +03467517/points/16916a50a064304bf6ed0b697979412e.pts 03467517/expert_verified/points_label/16916a50a064304bf6ed0b697979412e.seg 03467517 +03636649/points/c802fa4c82498450af6016f34c89d087.pts 03636649/expert_verified/points_label/c802fa4c82498450af6016f34c89d087.seg 03636649 +03001627/points/1ec5a88141aefca9cf6e4dd7ee69d71f.pts 03001627/expert_verified/points_label/1ec5a88141aefca9cf6e4dd7ee69d71f.seg 03001627 +04379243/points/bdefbb1f281434e39961e1085a81acc5.pts 04379243/expert_verified/points_label/bdefbb1f281434e39961e1085a81acc5.seg 04379243 +04379243/points/acf57dbafe8966f577fb15a8d7923976.pts 04379243/expert_verified/points_label/acf57dbafe8966f577fb15a8d7923976.seg 04379243 +03642806/points/cc67f6608c41743ec1830f8ca7a3cbed.pts 03642806/expert_verified/points_label/cc67f6608c41743ec1830f8ca7a3cbed.seg 03642806 +03001627/points/95e1571acdd75922afdb9a672b7d3b8a.pts 03001627/expert_verified/points_label/95e1571acdd75922afdb9a672b7d3b8a.seg 03001627 +04379243/points/2ebe5dfb7bd9a50c6effbd64ad6b71b8.pts 04379243/expert_verified/points_label/2ebe5dfb7bd9a50c6effbd64ad6b71b8.seg 04379243 +03001627/points/a6420c4ed13cf628945a77b945b7b70f.pts 03001627/expert_verified/points_label/a6420c4ed13cf628945a77b945b7b70f.seg 03001627 +04379243/points/1de679dd26d8c69cae44c65a6d0f0732.pts 04379243/expert_verified/points_label/1de679dd26d8c69cae44c65a6d0f0732.seg 04379243 +03001627/points/271012d5de261d08101accd22c701b9.pts 03001627/expert_verified/points_label/271012d5de261d08101accd22c701b9.seg 03001627 +04379243/points/5e409a2627f7cd7d63ecd64ef0e6814c.pts 04379243/expert_verified/points_label/5e409a2627f7cd7d63ecd64ef0e6814c.seg 04379243 +02691156/points/c9aeb20d7cd1b3b45e9e2656aff7dd5b.pts 02691156/expert_verified/points_label/c9aeb20d7cd1b3b45e9e2656aff7dd5b.seg 02691156 +04379243/points/45b23ac79688170893ba1eeaf62819a2.pts 04379243/expert_verified/points_label/45b23ac79688170893ba1eeaf62819a2.seg 04379243 +02691156/points/9ac292686a2fcebbe719b5362fe06bbb.pts 02691156/expert_verified/points_label/9ac292686a2fcebbe719b5362fe06bbb.seg 02691156 +04379243/points/3b0c62bde7b24de85ce578b5b4bfae3c.pts 04379243/expert_verified/points_label/3b0c62bde7b24de85ce578b5b4bfae3c.seg 04379243 +02958343/points/c487e9850891e1ec2d15396b7bcc6366.pts 02958343/expert_verified/points_label/c487e9850891e1ec2d15396b7bcc6366.seg 02958343 +03636649/points/b8e25e0825cb5db7765609a3f435fe9d.pts 03636649/expert_verified/points_label/b8e25e0825cb5db7765609a3f435fe9d.seg 03636649 +03001627/points/9fd6bb18dc21c70766ef9dd2f3ef27d3.pts 03001627/expert_verified/points_label/9fd6bb18dc21c70766ef9dd2f3ef27d3.seg 03001627 +02958343/points/bf37249fc8e16fd8f9a88cc63b910f3.pts 02958343/expert_verified/points_label/bf37249fc8e16fd8f9a88cc63b910f3.seg 02958343 +04225987/points/58ae991bd0350810b9ac379f661f5c75.pts 04225987/expert_verified/points_label/58ae991bd0350810b9ac379f661f5c75.seg 04225987 +03001627/points/508306f8ddf1b54c41cc9e8c39b4e399.pts 03001627/expert_verified/points_label/508306f8ddf1b54c41cc9e8c39b4e399.seg 03001627 +03642806/points/ef5b312fc20f1b20aab089a6db538ba7.pts 03642806/expert_verified/points_label/ef5b312fc20f1b20aab089a6db538ba7.seg 03642806 +03001627/points/d97c5945e9449a58737e4e0df09d751.pts 03001627/expert_verified/points_label/d97c5945e9449a58737e4e0df09d751.seg 03001627 +03001627/points/e1897a4391784bc2e8b2b8dc0c816caf.pts 03001627/expert_verified/points_label/e1897a4391784bc2e8b2b8dc0c816caf.seg 03001627 +04379243/points/a624ebf0bf0451a8d93768e7b9b1eabf.pts 04379243/expert_verified/points_label/a624ebf0bf0451a8d93768e7b9b1eabf.seg 04379243 +03636649/points/1e5e1ff56c27c0d2adc5f5aafedb1c38.pts 03636649/expert_verified/points_label/1e5e1ff56c27c0d2adc5f5aafedb1c38.seg 03636649 +03642806/points/2ce3a50ca6087f30d8e007cc6755cce9.pts 03642806/expert_verified/points_label/2ce3a50ca6087f30d8e007cc6755cce9.seg 03642806 +02691156/points/d615a8217b70af06bc0909d98a1ff2b4.pts 02691156/expert_verified/points_label/d615a8217b70af06bc0909d98a1ff2b4.seg 02691156 +02691156/points/6f72a0d86494b551a834b9c8bfc8647a.pts 02691156/expert_verified/points_label/6f72a0d86494b551a834b9c8bfc8647a.seg 02691156 +03001627/points/20fbab2b8770a1cbf51f77a6d7299806.pts 03001627/expert_verified/points_label/20fbab2b8770a1cbf51f77a6d7299806.seg 03001627 +03001627/points/d239d38424429a9a4626612b5d655dc.pts 03001627/expert_verified/points_label/d239d38424429a9a4626612b5d655dc.seg 03001627 +03001627/points/4c97f421c4ea4396d8ac5d7ad0953104.pts 03001627/expert_verified/points_label/4c97f421c4ea4396d8ac5d7ad0953104.seg 03001627 +03001627/points/5b68a6c2baf0ad61d0de9c949c366777.pts 03001627/expert_verified/points_label/5b68a6c2baf0ad61d0de9c949c366777.seg 03001627 +04379243/points/9bd1c242bd66d2fbb63c01786992bd2f.pts 04379243/expert_verified/points_label/9bd1c242bd66d2fbb63c01786992bd2f.seg 04379243 +03001627/points/e2dbe84030167f1ca5aad165050e534c.pts 03001627/expert_verified/points_label/e2dbe84030167f1ca5aad165050e534c.seg 03001627 +03001627/points/1c17cc67b8c747c3febad4f49b26ec52.pts 03001627/expert_verified/points_label/1c17cc67b8c747c3febad4f49b26ec52.seg 03001627 +04379243/points/2766a883126503cac3bd24f986301745.pts 04379243/expert_verified/points_label/2766a883126503cac3bd24f986301745.seg 04379243 +04225987/points/755dc44dae7791761082f2ea630bf69e.pts 04225987/expert_verified/points_label/755dc44dae7791761082f2ea630bf69e.seg 04225987 +04379243/points/c38ba6c06d2b813230c589758b4b5646.pts 04379243/expert_verified/points_label/c38ba6c06d2b813230c589758b4b5646.seg 04379243 +02691156/points/44c0cb6571f6f000ca8607f540cc62ba.pts 02691156/expert_verified/points_label/44c0cb6571f6f000ca8607f540cc62ba.seg 02691156 +03636649/points/522bc10920249e67141c66e2b49d221.pts 03636649/expert_verified/points_label/522bc10920249e67141c66e2b49d221.seg 03636649 +03790512/points/4548d86cf7f1c11ad373c34785838ee4.pts 03790512/expert_verified/points_label/4548d86cf7f1c11ad373c34785838ee4.seg 03790512 +02958343/points/37c5ac3d5b34761add75f724c0ccbe00.pts 02958343/expert_verified/points_label/37c5ac3d5b34761add75f724c0ccbe00.seg 02958343 +04379243/points/a15f31e2302f6ae5d67a73ffd62ba73f.pts 04379243/expert_verified/points_label/a15f31e2302f6ae5d67a73ffd62ba73f.seg 04379243 +02958343/points/6d714f7b7170a581da8e502a3c6cb4fb.pts 02958343/expert_verified/points_label/6d714f7b7170a581da8e502a3c6cb4fb.seg 02958343 +03624134/points/17c4163247e9237d4b7644126b1d71e0.pts 03624134/expert_verified/points_label/17c4163247e9237d4b7644126b1d71e0.seg 03624134 +03636649/points/7972fd0fe5755b4ad42b9650f19dd425.pts 03636649/expert_verified/points_label/7972fd0fe5755b4ad42b9650f19dd425.seg 03636649 +03001627/points/8ff4ba87d700054546992ce9fde1b2c2.pts 03001627/expert_verified/points_label/8ff4ba87d700054546992ce9fde1b2c2.seg 03001627 +03636649/points/a654df55875a2104d663817442d5278.pts 03636649/expert_verified/points_label/a654df55875a2104d663817442d5278.seg 03636649 +04379243/points/9c12fada31224bdf58c4e7e56d799d97.pts 04379243/expert_verified/points_label/9c12fada31224bdf58c4e7e56d799d97.seg 04379243 +03636649/points/9dad7ce60aa168d72cd2160e449d45ae.pts 03636649/expert_verified/points_label/9dad7ce60aa168d72cd2160e449d45ae.seg 03636649 +02691156/points/cfb555a4d82a600aca8607f540cc62ba.pts 02691156/expert_verified/points_label/cfb555a4d82a600aca8607f540cc62ba.seg 02691156 +04379243/points/415c174ecdc612fb6f5c30e29039b12d.pts 04379243/expert_verified/points_label/415c174ecdc612fb6f5c30e29039b12d.seg 04379243 +03467517/points/a5e2f05386e4ba55a894e1aba5d3799a.pts 03467517/expert_verified/points_label/a5e2f05386e4ba55a894e1aba5d3799a.seg 03467517 +03001627/points/a91b2c89e543a4b3aa3d970c5602cd4a.pts 03001627/expert_verified/points_label/a91b2c89e543a4b3aa3d970c5602cd4a.seg 03001627 +03624134/points/97ed13011e2d85e16029317225a75a9f.pts 03624134/expert_verified/points_label/97ed13011e2d85e16029317225a75a9f.seg 03624134 +04379243/points/388ea3f8ba27da8b777b6246417c94ff.pts 04379243/expert_verified/points_label/388ea3f8ba27da8b777b6246417c94ff.seg 04379243 +04379243/points/983cd9caf65adf1ddf6cfab91d65bb91.pts 04379243/expert_verified/points_label/983cd9caf65adf1ddf6cfab91d65bb91.seg 04379243 +03001627/points/e65d2f0ed75a786a37b2bb75885cfc44.pts 03001627/expert_verified/points_label/e65d2f0ed75a786a37b2bb75885cfc44.seg 03001627 +03624134/points/dce941899bcb752dfe474f09e3f3ac9a.pts 03624134/expert_verified/points_label/dce941899bcb752dfe474f09e3f3ac9a.seg 03624134 +04379243/points/ea3bcd9e6c4205031964126395b17c2a.pts 04379243/expert_verified/points_label/ea3bcd9e6c4205031964126395b17c2a.seg 04379243 +02691156/points/d13d131a649c5df38b96ae1a0a8b84ec.pts 02691156/expert_verified/points_label/d13d131a649c5df38b96ae1a0a8b84ec.seg 02691156 +04379243/points/f917474a20558aa33bbab77a66bc3671.pts 04379243/expert_verified/points_label/f917474a20558aa33bbab77a66bc3671.seg 04379243 +03001627/points/4a24652fbf2bed7e93583c67df8faf1.pts 03001627/expert_verified/points_label/4a24652fbf2bed7e93583c67df8faf1.seg 03001627 +02691156/points/5dd2324cd6ebf52e293fdbda4e7beec9.pts 02691156/expert_verified/points_label/5dd2324cd6ebf52e293fdbda4e7beec9.seg 02691156 +03642806/points/a59d3d87068d313c2656684d670220c2.pts 03642806/expert_verified/points_label/a59d3d87068d313c2656684d670220c2.seg 03642806 +04379243/points/5354ecb0e3aa1da074a16879fb3ac81f.pts 04379243/expert_verified/points_label/5354ecb0e3aa1da074a16879fb3ac81f.seg 04379243 +03642806/points/6c6a96e4486cc02cda66ecbb2c411f37.pts 03642806/expert_verified/points_label/6c6a96e4486cc02cda66ecbb2c411f37.seg 03642806 +04225987/points/fd3627deb2476b0f1f942c57ac0e8959.pts 04225987/expert_verified/points_label/fd3627deb2476b0f1f942c57ac0e8959.seg 04225987 +04379243/points/91bf48934d3b52ea36658c6705d0c08.pts 04379243/expert_verified/points_label/91bf48934d3b52ea36658c6705d0c08.seg 04379243 +04379243/points/18be1556eb4da5af7ccf848ce05c84be.pts 04379243/expert_verified/points_label/18be1556eb4da5af7ccf848ce05c84be.seg 04379243 +02958343/points/33211aabfefa14603b05c2ad25b4380f.pts 02958343/expert_verified/points_label/33211aabfefa14603b05c2ad25b4380f.seg 02958343 +04379243/points/3243ddb2aa4d1659beb83c64f2162734.pts 04379243/expert_verified/points_label/3243ddb2aa4d1659beb83c64f2162734.seg 04379243 +04379243/points/4ce90fe70faf4c3e255bc16374754e69.pts 04379243/expert_verified/points_label/4ce90fe70faf4c3e255bc16374754e69.seg 04379243 +04379243/points/15be511a2433482aa192483aa282f8e5.pts 04379243/expert_verified/points_label/15be511a2433482aa192483aa282f8e5.seg 04379243 +03624134/points/70b6b3ba6a27fd6f782db73f915dfbb8.pts 03624134/expert_verified/points_label/70b6b3ba6a27fd6f782db73f915dfbb8.seg 03624134 +03001627/points/519d19f3adebd20aba49014d9a3afe99.pts 03001627/expert_verified/points_label/519d19f3adebd20aba49014d9a3afe99.seg 03001627 +03467517/points/ca9720d793355dd693f0194265a9746c.pts 03467517/expert_verified/points_label/ca9720d793355dd693f0194265a9746c.seg 03467517 +03001627/points/e19214cabca496a3f7b54e04c7238d7.pts 03001627/expert_verified/points_label/e19214cabca496a3f7b54e04c7238d7.seg 03001627 +03001627/points/ea1bfe81b88395fcaa29e9f0529e8ef7.pts 03001627/expert_verified/points_label/ea1bfe81b88395fcaa29e9f0529e8ef7.seg 03001627 +03001627/points/2b110b833111b38c420adf24e49f74c8.pts 03001627/expert_verified/points_label/2b110b833111b38c420adf24e49f74c8.seg 03001627 +03001627/points/7b405c1d6d2dbea9f91663a74ccd2338.pts 03001627/expert_verified/points_label/7b405c1d6d2dbea9f91663a74ccd2338.seg 03001627 +02691156/points/489d3e4cc3d790a0ca8607f540cc62ba.pts 02691156/expert_verified/points_label/489d3e4cc3d790a0ca8607f540cc62ba.seg 02691156 +04379243/points/79eeee790ed5a5aac242632b2a8c3129.pts 04379243/expert_verified/points_label/79eeee790ed5a5aac242632b2a8c3129.seg 04379243 +03624134/points/665bf5d30d342d64adee73efb2c043f8.pts 03624134/expert_verified/points_label/665bf5d30d342d64adee73efb2c043f8.seg 03624134 +03467517/points/7f3f5c9953fb7e0a6cbec6f3d994a573.pts 03467517/expert_verified/points_label/7f3f5c9953fb7e0a6cbec6f3d994a573.seg 03467517 +03001627/points/d2597d18fdc3594e1dc59d2adbe5297d.pts 03001627/expert_verified/points_label/d2597d18fdc3594e1dc59d2adbe5297d.seg 03001627 +03001627/points/a9a1147eae9936f76f1e07a56c129dfc.pts 03001627/expert_verified/points_label/a9a1147eae9936f76f1e07a56c129dfc.seg 03001627 +02691156/points/64cb683afd5e9e559db1d21b460eacef.pts 02691156/expert_verified/points_label/64cb683afd5e9e559db1d21b460eacef.seg 02691156 +03624134/points/e0a78d771cfde145a5cea7e40e4d21ff.pts 03624134/expert_verified/points_label/e0a78d771cfde145a5cea7e40e4d21ff.seg 03624134 +02691156/points/e59c4f290d8585a862b600da24e0965.pts 02691156/expert_verified/points_label/e59c4f290d8585a862b600da24e0965.seg 02691156 +04379243/points/523ac3575244c7f3a130bbab7337a0cf.pts 04379243/expert_verified/points_label/523ac3575244c7f3a130bbab7337a0cf.seg 04379243 +03001627/points/96e83c79e8d76d4519fb4103277a6b93.pts 03001627/expert_verified/points_label/96e83c79e8d76d4519fb4103277a6b93.seg 03001627 +04379243/points/a2781622b5941ff2a886fe6408aa7382.pts 04379243/expert_verified/points_label/a2781622b5941ff2a886fe6408aa7382.seg 04379243 +04379243/points/5d24567426a614ecfd726e98b98fb36f.pts 04379243/expert_verified/points_label/5d24567426a614ecfd726e98b98fb36f.seg 04379243 +03001627/points/a5a2d09e5384237869513d0907f19c8f.pts 03001627/expert_verified/points_label/a5a2d09e5384237869513d0907f19c8f.seg 03001627 +02691156/points/e02485f093835f45c1b64d86df61366a.pts 02691156/expert_verified/points_label/e02485f093835f45c1b64d86df61366a.seg 02691156 +04379243/points/58f8fd169c9578e62f81cb887dc35578.pts 04379243/expert_verified/points_label/58f8fd169c9578e62f81cb887dc35578.seg 04379243 +04379243/points/c755eeaa4a588fcba9126dd5adc92c1e.pts 04379243/expert_verified/points_label/c755eeaa4a588fcba9126dd5adc92c1e.seg 04379243 +03001627/points/704179dd47a2282e676de9b6e111da8b.pts 03001627/expert_verified/points_label/704179dd47a2282e676de9b6e111da8b.seg 03001627 +03001627/points/9253f198c06794cdc7689830acac6e59.pts 03001627/expert_verified/points_label/9253f198c06794cdc7689830acac6e59.seg 03001627 +04379243/points/2ba8eb5ec0a05694593ebeeedbff73b.pts 04379243/expert_verified/points_label/2ba8eb5ec0a05694593ebeeedbff73b.seg 04379243 +03467517/points/133ebdf2ca7bf4b81d4e8021f58beea0.pts 03467517/expert_verified/points_label/133ebdf2ca7bf4b81d4e8021f58beea0.seg 03467517 +03467517/points/ba6d3dcff42ea7bba32c4b8efb0131e.pts 03467517/expert_verified/points_label/ba6d3dcff42ea7bba32c4b8efb0131e.seg 03467517 +03467517/points/222b705a80d75a4343b0b12983b9982.pts 03467517/expert_verified/points_label/222b705a80d75a4343b0b12983b9982.seg 03467517 +04379243/points/47317755c82114d5c3bd24f986301745.pts 04379243/expert_verified/points_label/47317755c82114d5c3bd24f986301745.seg 04379243 +04379243/points/175c0be26d0f2e916cb0bd372b0960ba.pts 04379243/expert_verified/points_label/175c0be26d0f2e916cb0bd372b0960ba.seg 04379243 +03636649/points/19388898dd69dd9fddc8e6d1ec6242c3.pts 03636649/expert_verified/points_label/19388898dd69dd9fddc8e6d1ec6242c3.seg 03636649 +04379243/points/3cec584145ee513d635418e95eea8a17.pts 04379243/expert_verified/points_label/3cec584145ee513d635418e95eea8a17.seg 04379243 +03001627/points/3a5c8d46fdc6793b956abdbfba57903a.pts 03001627/expert_verified/points_label/3a5c8d46fdc6793b956abdbfba57903a.seg 03001627 +03001627/points/3d32d89db2286377e63c6421b71f17c8.pts 03001627/expert_verified/points_label/3d32d89db2286377e63c6421b71f17c8.seg 03001627 +03001627/points/47a45ce9fb219083411e8b42940aba04.pts 03001627/expert_verified/points_label/47a45ce9fb219083411e8b42940aba04.seg 03001627 +03467517/points/214f6a08b78670de2cb522418d5742a0.pts 03467517/expert_verified/points_label/214f6a08b78670de2cb522418d5742a0.seg 03467517 +04379243/points/1b4bc147baf68d4ff008d8a3590fb522.pts 04379243/expert_verified/points_label/1b4bc147baf68d4ff008d8a3590fb522.seg 04379243 +03467517/points/83b2ecf5caced214e313875ff213ee10.pts 03467517/expert_verified/points_label/83b2ecf5caced214e313875ff213ee10.seg 03467517 +02691156/points/57fe8ad460bcb4929a4a28ef635593ce.pts 02691156/expert_verified/points_label/57fe8ad460bcb4929a4a28ef635593ce.seg 02691156 +03624134/points/e8a6915bd0bcf1bebaa284808a1567a8.pts 03624134/expert_verified/points_label/e8a6915bd0bcf1bebaa284808a1567a8.seg 03624134 +03001627/points/1da29597f89c2b004b3c42e318f3affc.pts 03001627/expert_verified/points_label/1da29597f89c2b004b3c42e318f3affc.seg 03001627 +04379243/points/2ef899e67eecef65190a91fd9a6f7d55.pts 04379243/expert_verified/points_label/2ef899e67eecef65190a91fd9a6f7d55.seg 04379243 +04379243/points/811a7be3be14bd2b62103e4bff47b4cd.pts 04379243/expert_verified/points_label/811a7be3be14bd2b62103e4bff47b4cd.seg 04379243 +03948459/points/592017db407391c68e7e947594effe19.pts 03948459/expert_verified/points_label/592017db407391c68e7e947594effe19.seg 03948459 +03636649/points/eb311e6232cb7011bb5bd941c6665c21.pts 03636649/expert_verified/points_label/eb311e6232cb7011bb5bd941c6665c21.seg 03636649 +02691156/points/caa7e70beee4543f42c20743f866e1a6.pts 02691156/expert_verified/points_label/caa7e70beee4543f42c20743f866e1a6.seg 02691156 +03001627/points/3aaa59b19eebcb5f41552c6ecbda964b.pts 03001627/expert_verified/points_label/3aaa59b19eebcb5f41552c6ecbda964b.seg 03001627 +03001627/points/a93aac9ad86008e69fc01fb65ca37d30.pts 03001627/expert_verified/points_label/a93aac9ad86008e69fc01fb65ca37d30.seg 03001627 +03624134/points/ceeb38ab7929361e76ec14627bf6bbcb.pts 03624134/expert_verified/points_label/ceeb38ab7929361e76ec14627bf6bbcb.seg 03624134 +03001627/points/93dc91115a9002e1663fcfd6703c85f3.pts 03001627/expert_verified/points_label/93dc91115a9002e1663fcfd6703c85f3.seg 03001627 +04379243/points/b08310a1d75702eda09ce9c1262c7237.pts 04379243/expert_verified/points_label/b08310a1d75702eda09ce9c1262c7237.seg 04379243 +03797390/points/e9bd4ee553eb35c1d5ccc40b510e4bd.pts 03797390/expert_verified/points_label/e9bd4ee553eb35c1d5ccc40b510e4bd.seg 03797390 +03001627/points/bdd57499bf64fab6bf80985a99195eb8.pts 03001627/expert_verified/points_label/bdd57499bf64fab6bf80985a99195eb8.seg 03001627 +04379243/points/48af84a5600ad5bc19fb4103277a6b93.pts 04379243/expert_verified/points_label/48af84a5600ad5bc19fb4103277a6b93.seg 04379243 +03001627/points/738395f54b301d80b1f5d603f931c1aa.pts 03001627/expert_verified/points_label/738395f54b301d80b1f5d603f931c1aa.seg 03001627 +03790512/points/6819949f5625ca12d0f568c31c1cd62a.pts 03790512/expert_verified/points_label/6819949f5625ca12d0f568c31c1cd62a.seg 03790512 +03467517/points/70d9a5d0330abd9df4b498e11fb60a4b.pts 03467517/expert_verified/points_label/70d9a5d0330abd9df4b498e11fb60a4b.seg 03467517 +02958343/points/174f1a421f652029d577c0ac53e96823.pts 02958343/expert_verified/points_label/174f1a421f652029d577c0ac53e96823.seg 02958343 +03001627/points/d764960666572084b1ea4e06e88051f3.pts 03001627/expert_verified/points_label/d764960666572084b1ea4e06e88051f3.seg 03001627 +02691156/points/ba662ec78231c493252b4f9439ef95a6.pts 02691156/expert_verified/points_label/ba662ec78231c493252b4f9439ef95a6.seg 02691156 +03636649/points/8a9f2e5b726ea37f60ad823977adaa23.pts 03636649/expert_verified/points_label/8a9f2e5b726ea37f60ad823977adaa23.seg 03636649 +04379243/points/80af0f92ecf69f69f5ff054d67d5fe35.pts 04379243/expert_verified/points_label/80af0f92ecf69f69f5ff054d67d5fe35.seg 04379243 +04379243/points/ce4e075487aa05ecdcfcef693e7ec696.pts 04379243/expert_verified/points_label/ce4e075487aa05ecdcfcef693e7ec696.seg 04379243 +03001627/points/564f5f96bc718194166420d06689fcf.pts 03001627/expert_verified/points_label/564f5f96bc718194166420d06689fcf.seg 03001627 +03636649/points/88d29e1350eda810c066b9622c005c53.pts 03636649/expert_verified/points_label/88d29e1350eda810c066b9622c005c53.seg 03636649 +04379243/points/346db24c1279e8d273fdbe4b39ff4036.pts 04379243/expert_verified/points_label/346db24c1279e8d273fdbe4b39ff4036.seg 04379243 +04379243/points/7062f5b229674ab7b0b54dd2cf2a35d4.pts 04379243/expert_verified/points_label/7062f5b229674ab7b0b54dd2cf2a35d4.seg 04379243 +03636649/points/923097cec128ae77469cbaa3d6420fb4.pts 03636649/expert_verified/points_label/923097cec128ae77469cbaa3d6420fb4.seg 03636649 +04379243/points/3fb5033b5ddaaf365f7afad12924b3b5.pts 04379243/expert_verified/points_label/3fb5033b5ddaaf365f7afad12924b3b5.seg 04379243 +03636649/points/32e9d8a4b5a141a2615efc34c3b36ef0.pts 03636649/expert_verified/points_label/32e9d8a4b5a141a2615efc34c3b36ef0.seg 03636649 +02691156/points/997cb29f544d6f2726360e1e29a956c7.pts 02691156/expert_verified/points_label/997cb29f544d6f2726360e1e29a956c7.seg 02691156 +04379243/points/7df9115b511668bdde98d10ab5975b59.pts 04379243/expert_verified/points_label/7df9115b511668bdde98d10ab5975b59.seg 04379243 +03636649/points/5580b95ab8e7806c6c5b8009db95f66f.pts 03636649/expert_verified/points_label/5580b95ab8e7806c6c5b8009db95f66f.seg 03636649 +04379243/points/6862bebc1f59a5caac7bed72580dc30f.pts 04379243/expert_verified/points_label/6862bebc1f59a5caac7bed72580dc30f.seg 04379243 +02691156/points/56ba815f883279b462b600da24e0965.pts 02691156/expert_verified/points_label/56ba815f883279b462b600da24e0965.seg 02691156 +03797390/points/5c48d471200d2bf16e8a121e6886e18d.pts 03797390/expert_verified/points_label/5c48d471200d2bf16e8a121e6886e18d.seg 03797390 +04379243/points/b48d04600e7cf2bebeedb4c8fd29e2d1.pts 04379243/expert_verified/points_label/b48d04600e7cf2bebeedb4c8fd29e2d1.seg 04379243 +02958343/points/323c9dc2a8911e146f2f07de403e98d8.pts 02958343/expert_verified/points_label/323c9dc2a8911e146f2f07de403e98d8.seg 02958343 +04225987/points/d3ff56062272f3e6346e65609be6d72f.pts 04225987/expert_verified/points_label/d3ff56062272f3e6346e65609be6d72f.seg 04225987 +03001627/points/af28dbdce6ed8cea19fb4103277a6b93.pts 03001627/expert_verified/points_label/af28dbdce6ed8cea19fb4103277a6b93.seg 03001627 +02958343/points/dfa6c32dec07727ee9d8921ebe6d5b8e.pts 02958343/expert_verified/points_label/dfa6c32dec07727ee9d8921ebe6d5b8e.seg 02958343 +03001627/points/c2b898dd5601454d626d7e3d07da8352.pts 03001627/expert_verified/points_label/c2b898dd5601454d626d7e3d07da8352.seg 03001627 +04379243/points/a7ef45d86ae5b496a97f238e46bc2221.pts 04379243/expert_verified/points_label/a7ef45d86ae5b496a97f238e46bc2221.seg 04379243 +04379243/points/1bd138c3e54a75d32f38c0d2792fb5e.pts 04379243/expert_verified/points_label/1bd138c3e54a75d32f38c0d2792fb5e.seg 04379243 +02958343/points/cd67376cac9f989151008e496c6cfd2e.pts 02958343/expert_verified/points_label/cd67376cac9f989151008e496c6cfd2e.seg 02958343 +03948459/points/af9eaed1d9574387ab2c2809513f396e.pts 03948459/expert_verified/points_label/af9eaed1d9574387ab2c2809513f396e.seg 03948459 +04379243/points/c418195771c7625945821c000807c3b1.pts 04379243/expert_verified/points_label/c418195771c7625945821c000807c3b1.seg 04379243 +04379243/points/88b227c5fb3906ce47c638c0eee4a2b3.pts 04379243/expert_verified/points_label/88b227c5fb3906ce47c638c0eee4a2b3.seg 04379243 +03467517/points/81bd0c7a35a147988cc3ae4061da3bb0.pts 03467517/expert_verified/points_label/81bd0c7a35a147988cc3ae4061da3bb0.seg 03467517 +04379243/points/5292f2930f188e0a7ff6ace05b36a5.pts 04379243/expert_verified/points_label/5292f2930f188e0a7ff6ace05b36a5.seg 04379243 +03636649/points/5f0a23ce527d0be52f38c0d2792fb5e.pts 03636649/expert_verified/points_label/5f0a23ce527d0be52f38c0d2792fb5e.seg 03636649 +03636649/points/98cdb45ca9925feb194eb328dc97c7e2.pts 03636649/expert_verified/points_label/98cdb45ca9925feb194eb328dc97c7e2.seg 03636649 +03790512/points/47054c1839830834a88e8cb97b773125.pts 03790512/expert_verified/points_label/47054c1839830834a88e8cb97b773125.seg 03790512 +03001627/points/b058cc77e628ac01c433ba3e0e025e8c.pts 03001627/expert_verified/points_label/b058cc77e628ac01c433ba3e0e025e8c.seg 03001627 +04225987/points/f74a5dfc0094e2d5561dce3fe08634b7.pts 04225987/expert_verified/points_label/f74a5dfc0094e2d5561dce3fe08634b7.seg 04225987 +02958343/points/e20b8a9c388eeb012c8b6ee41d7d5d62.pts 02958343/expert_verified/points_label/e20b8a9c388eeb012c8b6ee41d7d5d62.seg 02958343 +02958343/points/7203130a35ab20a4b1bb46d2556ba67d.pts 02958343/expert_verified/points_label/7203130a35ab20a4b1bb46d2556ba67d.seg 02958343 +03261776/points/2c6f04001afcce7ded85c3dc02bada79.pts 03261776/expert_verified/points_label/2c6f04001afcce7ded85c3dc02bada79.seg 03261776 +03001627/points/951fb0d7ad8ab2bec5b5bea66ef4576d.pts 03001627/expert_verified/points_label/951fb0d7ad8ab2bec5b5bea66ef4576d.seg 03001627 +02691156/points/54e926e12382808b66cf1b4a8fc3914e.pts 02691156/expert_verified/points_label/54e926e12382808b66cf1b4a8fc3914e.seg 02691156 +03001627/points/4c513ea0804fc008c8687ff9b0b4e4ac.pts 03001627/expert_verified/points_label/4c513ea0804fc008c8687ff9b0b4e4ac.seg 03001627 +03001627/points/748957972cae6b03c56be62b05937331.pts 03001627/expert_verified/points_label/748957972cae6b03c56be62b05937331.seg 03001627 +03001627/points/cc2639f8c584001a922dfe32810651d0.pts 03001627/expert_verified/points_label/cc2639f8c584001a922dfe32810651d0.seg 03001627 +04379243/points/d2f811bc37858425a63ceecddc308b25.pts 04379243/expert_verified/points_label/d2f811bc37858425a63ceecddc308b25.seg 04379243 +03001627/points/d48dac046436a29ec3bd24f986301745.pts 03001627/expert_verified/points_label/d48dac046436a29ec3bd24f986301745.seg 03001627 +03001627/points/30fafef5c734f926781ba0fdb47276df.pts 03001627/expert_verified/points_label/30fafef5c734f926781ba0fdb47276df.seg 03001627 +03001627/points/7293291b3fe8233fdef1c01cbd4ae0c.pts 03001627/expert_verified/points_label/7293291b3fe8233fdef1c01cbd4ae0c.seg 03001627 +03636649/points/3deedc86a83bbf23f647dc544bb0ab61.pts 03636649/expert_verified/points_label/3deedc86a83bbf23f647dc544bb0ab61.seg 03636649 +03467517/points/bb4a5712da8f63330d758421dd01f45.pts 03467517/expert_verified/points_label/bb4a5712da8f63330d758421dd01f45.seg 03467517 +03636649/points/39af776c1435a3374b59758e9336ca87.pts 03636649/expert_verified/points_label/39af776c1435a3374b59758e9336ca87.seg 03636649 +04379243/points/ef9f3af9b8453613febad4f49b26ec52.pts 04379243/expert_verified/points_label/ef9f3af9b8453613febad4f49b26ec52.seg 04379243 +02691156/points/29192f8c96264e3435fc197bbabcd5bd.pts 02691156/expert_verified/points_label/29192f8c96264e3435fc197bbabcd5bd.seg 02691156 +02691156/points/75d162523d703917b87697d3904b168b.pts 02691156/expert_verified/points_label/75d162523d703917b87697d3904b168b.seg 02691156 +04379243/points/3c04f4e0d183976a7e7cb173e141227.pts 04379243/expert_verified/points_label/3c04f4e0d183976a7e7cb173e141227.seg 04379243 +03790512/points/80011e85cd42668ad373c34785838ee4.pts 03790512/expert_verified/points_label/80011e85cd42668ad373c34785838ee4.seg 03790512 +04379243/points/994e524d70043c3496e349c87c588bf2.pts 04379243/expert_verified/points_label/994e524d70043c3496e349c87c588bf2.seg 04379243 +02691156/points/b1f08c51a098c43696d224195a988f09.pts 02691156/expert_verified/points_label/b1f08c51a098c43696d224195a988f09.seg 02691156 +04379243/points/cb31b6293506eb639a3528690d225ee1.pts 04379243/expert_verified/points_label/cb31b6293506eb639a3528690d225ee1.seg 04379243 +02691156/points/d70d648947c65b1eca8607f540cc62ba.pts 02691156/expert_verified/points_label/d70d648947c65b1eca8607f540cc62ba.seg 02691156 +03636649/points/7bebdd742342ba93febad4f49b26ec52.pts 03636649/expert_verified/points_label/7bebdd742342ba93febad4f49b26ec52.seg 03636649 +02691156/points/2a2caad9e540dcc687bf26680c510802.pts 02691156/expert_verified/points_label/2a2caad9e540dcc687bf26680c510802.seg 02691156 +03790512/points/73fd19410ce60b83d5dde04c96fd8146.pts 03790512/expert_verified/points_label/73fd19410ce60b83d5dde04c96fd8146.seg 03790512 +04379243/points/ccb8c52ff9e7a01819fb4103277a6b93.pts 04379243/expert_verified/points_label/ccb8c52ff9e7a01819fb4103277a6b93.seg 04379243 +03467517/points/cc9e9ef3e1326c5363e148e250c0340d.pts 03467517/expert_verified/points_label/cc9e9ef3e1326c5363e148e250c0340d.seg 03467517 +03001627/points/d5360f2b0b0299c29b9f2eb77f5e247e.pts 03001627/expert_verified/points_label/d5360f2b0b0299c29b9f2eb77f5e247e.seg 03001627 +02691156/points/6b69e4c1cceb6e0681fa1ee3c368532e.pts 02691156/expert_verified/points_label/6b69e4c1cceb6e0681fa1ee3c368532e.seg 02691156 +02691156/points/3ae96a1e1bb488942296d88107d065f6.pts 02691156/expert_verified/points_label/3ae96a1e1bb488942296d88107d065f6.seg 02691156 +04379243/points/5e4351c4525fae6d6fa63795f94c4d8c.pts 04379243/expert_verified/points_label/5e4351c4525fae6d6fa63795f94c4d8c.seg 04379243 +04225987/points/5c55e6b6708f730d758f6def7204bd6b.pts 04225987/expert_verified/points_label/5c55e6b6708f730d758f6def7204bd6b.seg 04225987 +03001627/points/a48e359faed3da88d3519c62a8100783.pts 03001627/expert_verified/points_label/a48e359faed3da88d3519c62a8100783.seg 03001627 +03467517/points/a4170135b1055cb8982c503992eaf09.pts 03467517/expert_verified/points_label/a4170135b1055cb8982c503992eaf09.seg 03467517 +02958343/points/b3f1ad55fa401c35e8c505ac322336cc.pts 02958343/expert_verified/points_label/b3f1ad55fa401c35e8c505ac322336cc.seg 02958343 +02691156/points/c7c5bb658cafcc7c67711f7c205c5b63.pts 02691156/expert_verified/points_label/c7c5bb658cafcc7c67711f7c205c5b63.seg 02691156 +02691156/points/914c308ac4a9156842c20743f866e1a6.pts 02691156/expert_verified/points_label/914c308ac4a9156842c20743f866e1a6.seg 02691156 +04379243/points/23acbe1f91d445f91ca1c7e576bee6b9.pts 04379243/expert_verified/points_label/23acbe1f91d445f91ca1c7e576bee6b9.seg 04379243 +04379243/points/8eb366f4f602219b490ad276cd2af3a4.pts 04379243/expert_verified/points_label/8eb366f4f602219b490ad276cd2af3a4.seg 04379243 +03624134/points/508ca8fa00e0cbb3e168961dc7b88f65.pts 03624134/expert_verified/points_label/508ca8fa00e0cbb3e168961dc7b88f65.seg 03624134 +04379243/points/be045fca16562f6764c85287e21825c4.pts 04379243/expert_verified/points_label/be045fca16562f6764c85287e21825c4.seg 04379243 +03001627/points/70f57047512c2eb84104b1c5cb7f9280.pts 03001627/expert_verified/points_label/70f57047512c2eb84104b1c5cb7f9280.seg 03001627 +03001627/points/975ea4be01c7488611bc8e8361bc5303.pts 03001627/expert_verified/points_label/975ea4be01c7488611bc8e8361bc5303.seg 03001627 +04379243/points/3c7cf00cd78adaef4b3c42e318f3affc.pts 04379243/expert_verified/points_label/3c7cf00cd78adaef4b3c42e318f3affc.seg 04379243 +02773838/points/220f08ff0c1d2a4542282fc88db7886b.pts 02773838/expert_verified/points_label/220f08ff0c1d2a4542282fc88db7886b.seg 02773838 +03636649/points/e35c4fadbf8d0426c26e81144f3196d5.pts 03636649/expert_verified/points_label/e35c4fadbf8d0426c26e81144f3196d5.seg 03636649 +03642806/points/93958423b98be8b538ff1b6d120c56aa.pts 03642806/expert_verified/points_label/93958423b98be8b538ff1b6d120c56aa.seg 03642806 +04379243/points/cf24f0128755080569080f7eaa8f3e1d.pts 04379243/expert_verified/points_label/cf24f0128755080569080f7eaa8f3e1d.seg 04379243 +04379243/points/f5cbbe04afdc4697562b835b63cfd09c.pts 04379243/expert_verified/points_label/f5cbbe04afdc4697562b835b63cfd09c.seg 04379243 +04379243/points/7a7590d19cf8274dab610b0c94236463.pts 04379243/expert_verified/points_label/7a7590d19cf8274dab610b0c94236463.seg 04379243 +03001627/points/bdfc3a43eccaac7e908cb3a44391b80.pts 03001627/expert_verified/points_label/bdfc3a43eccaac7e908cb3a44391b80.seg 03001627 +03636649/points/90d70f0a6b1cf72d79f0be73913de469.pts 03636649/expert_verified/points_label/90d70f0a6b1cf72d79f0be73913de469.seg 03636649 +03642806/points/17069b6604fc28bfa2f5beb253216d5b.pts 03642806/expert_verified/points_label/17069b6604fc28bfa2f5beb253216d5b.seg 03642806 +04379243/points/3b0625a3d623a7decfbec6fc6446a041.pts 04379243/expert_verified/points_label/3b0625a3d623a7decfbec6fc6446a041.seg 04379243 +04379243/points/9482c5f0a38a73c0fa16d3c3138134ae.pts 04379243/expert_verified/points_label/9482c5f0a38a73c0fa16d3c3138134ae.seg 04379243 +04379243/points/ed73c41dcfe9170119cc3eaf35cd388f.pts 04379243/expert_verified/points_label/ed73c41dcfe9170119cc3eaf35cd388f.seg 04379243 +04379243/points/1abed35643d34f60afed86cbd9fd5335.pts 04379243/expert_verified/points_label/1abed35643d34f60afed86cbd9fd5335.seg 04379243 +03001627/points/98e1936d3f25389bc3c6a889ee0bd115.pts 03001627/expert_verified/points_label/98e1936d3f25389bc3c6a889ee0bd115.seg 03001627 +03797390/points/ef24c302911bcde6ea6ff2182dd34668.pts 03797390/expert_verified/points_label/ef24c302911bcde6ea6ff2182dd34668.seg 03797390 +02773838/points/22b7d6fa819d62aefc69b7db9c6d5ad9.pts 02773838/expert_verified/points_label/22b7d6fa819d62aefc69b7db9c6d5ad9.seg 02773838 +03001627/points/19666f52289092a3394a3bbfc81460.pts 03001627/expert_verified/points_label/19666f52289092a3394a3bbfc81460.seg 03001627 +03001627/points/49b38e22f104005ecbde89e0c48a01bf.pts 03001627/expert_verified/points_label/49b38e22f104005ecbde89e0c48a01bf.seg 03001627 +04379243/points/de077e0bd6932baef12d7184a2ad3430.pts 04379243/expert_verified/points_label/de077e0bd6932baef12d7184a2ad3430.seg 04379243 +03001627/points/fe99f16c2532cdd07ba99ad16fdc05cd.pts 03001627/expert_verified/points_label/fe99f16c2532cdd07ba99ad16fdc05cd.seg 03001627 +03642806/points/a17cf326705a6443a09a37cf78d1b866.pts 03642806/expert_verified/points_label/a17cf326705a6443a09a37cf78d1b866.seg 03642806 +04379243/points/890940359fdfa036569c11df1aea8ca4.pts 04379243/expert_verified/points_label/890940359fdfa036569c11df1aea8ca4.seg 04379243 +03642806/points/7f75b94bd59d649958dd315c54df0c15.pts 03642806/expert_verified/points_label/7f75b94bd59d649958dd315c54df0c15.seg 03642806 +04379243/points/d0ef9d431a16e70de6c5cd45aa112726.pts 04379243/expert_verified/points_label/d0ef9d431a16e70de6c5cd45aa112726.seg 04379243 +03001627/points/2dc5055b8d900ec7db4b0ee93cf61ed1.pts 03001627/expert_verified/points_label/2dc5055b8d900ec7db4b0ee93cf61ed1.seg 03001627 +03001627/points/9e6b834449ed2db86199d6fe090be061.pts 03001627/expert_verified/points_label/9e6b834449ed2db86199d6fe090be061.seg 03001627 +04379243/points/9e3f1901ea14aca753315facdf531a34.pts 04379243/expert_verified/points_label/9e3f1901ea14aca753315facdf531a34.seg 04379243 +03001627/points/c4ebef05a72fc4f39d62eb3fdc2d3f8a.pts 03001627/expert_verified/points_label/c4ebef05a72fc4f39d62eb3fdc2d3f8a.seg 03001627 +03001627/points/428b77d0ffe6ab456e06155d245f15d6.pts 03001627/expert_verified/points_label/428b77d0ffe6ab456e06155d245f15d6.seg 03001627 +04225987/points/591971ce679ca4b93ad38b993d9e745f.pts 04225987/expert_verified/points_label/591971ce679ca4b93ad38b993d9e745f.seg 04225987 +03790512/points/bcabe20e46e5126ed5dde04c96fd8146.pts 03790512/expert_verified/points_label/bcabe20e46e5126ed5dde04c96fd8146.seg 03790512 +04379243/points/3ed500a12dfa511ba6040757a0125a99.pts 04379243/expert_verified/points_label/3ed500a12dfa511ba6040757a0125a99.seg 04379243 +04379243/points/1581d2682187764730bbd4cddd04c77b.pts 04379243/expert_verified/points_label/1581d2682187764730bbd4cddd04c77b.seg 04379243 +02691156/points/bb7d526405e9347b8f6810e1a2b6aa04.pts 02691156/expert_verified/points_label/bb7d526405e9347b8f6810e1a2b6aa04.seg 02691156 +02691156/points/fb9deec3a422b06b609e2d916fa0da27.pts 02691156/expert_verified/points_label/fb9deec3a422b06b609e2d916fa0da27.seg 02691156 +03636649/points/5e6abfc7d93fa5f1dc0efee4b442070.pts 03636649/expert_verified/points_label/5e6abfc7d93fa5f1dc0efee4b442070.seg 03636649 +03467517/points/2dbc73ad4ce7950163e148e250c0340d.pts 03467517/expert_verified/points_label/2dbc73ad4ce7950163e148e250c0340d.seg 03467517 +02958343/points/eea7f5d02088d49dfdb3c05088c091ae.pts 02958343/expert_verified/points_label/eea7f5d02088d49dfdb3c05088c091ae.seg 02958343 +04379243/points/83c24aad3914e61a73376642dd664bfd.pts 04379243/expert_verified/points_label/83c24aad3914e61a73376642dd664bfd.seg 04379243 +04379243/points/51874066ba946c58aaf15b62af6b513f.pts 04379243/expert_verified/points_label/51874066ba946c58aaf15b62af6b513f.seg 04379243 +03636649/points/5be8cdad3b218e373d39d8012919dd25.pts 03636649/expert_verified/points_label/5be8cdad3b218e373d39d8012919dd25.seg 03636649 +03636649/points/49cd0dd4d1c008edbbc7a6acbd8f058b.pts 03636649/expert_verified/points_label/49cd0dd4d1c008edbbc7a6acbd8f058b.seg 03636649 +03642806/points/d7e7e6651a23afc68ba4e518219eb66a.pts 03642806/expert_verified/points_label/d7e7e6651a23afc68ba4e518219eb66a.seg 03642806 +02958343/points/6026684ab31d567328044fe9244db50a.pts 02958343/expert_verified/points_label/6026684ab31d567328044fe9244db50a.seg 02958343 +04379243/points/c177762c0445d57ab20aa91e9e90c311.pts 04379243/expert_verified/points_label/c177762c0445d57ab20aa91e9e90c311.seg 04379243 +02691156/points/7bad9d15c0f0d3c03554ccf8c30febe7.pts 02691156/expert_verified/points_label/7bad9d15c0f0d3c03554ccf8c30febe7.seg 02691156 +03636649/points/dd818b0269b1aa15fcb8d8c6d4df8143.pts 03636649/expert_verified/points_label/dd818b0269b1aa15fcb8d8c6d4df8143.seg 03636649 +03624134/points/c4851aee1af7d874cc34b900bb2492e.pts 03624134/expert_verified/points_label/c4851aee1af7d874cc34b900bb2492e.seg 03624134 +03001627/points/e2ced471afce616454bfa32aa0766acb.pts 03001627/expert_verified/points_label/e2ced471afce616454bfa32aa0766acb.seg 03001627 +03797390/points/896f1d494bac0ebcdec712af445786fe.pts 03797390/expert_verified/points_label/896f1d494bac0ebcdec712af445786fe.seg 03797390 +04379243/points/481e00e4559705c616a2b5862518c93.pts 04379243/expert_verified/points_label/481e00e4559705c616a2b5862518c93.seg 04379243 +04379243/points/2ca883ba6a9dc6f68985be89a0ee21a.pts 04379243/expert_verified/points_label/2ca883ba6a9dc6f68985be89a0ee21a.seg 04379243 +04379243/points/ebc82e7df36f6e9a33963916b86d221f.pts 04379243/expert_verified/points_label/ebc82e7df36f6e9a33963916b86d221f.seg 04379243 +03001627/points/cdea84a63ad8c44febad4f49b26ec52.pts 03001627/expert_verified/points_label/cdea84a63ad8c44febad4f49b26ec52.seg 03001627 +03624134/points/c71280ea272fbfed4b7644126b1d71e0.pts 03624134/expert_verified/points_label/c71280ea272fbfed4b7644126b1d71e0.seg 03624134 +02958343/points/974c3d82f8726f086b418c7d9fedcaa9.pts 02958343/expert_verified/points_label/974c3d82f8726f086b418c7d9fedcaa9.seg 02958343 +02958343/points/4dbf4e0654d0c234e811106a82796d20.pts 02958343/expert_verified/points_label/4dbf4e0654d0c234e811106a82796d20.seg 02958343 +03467517/points/de9ca0c3e32f907dcb61cf5d9c47c2c7.pts 03467517/expert_verified/points_label/de9ca0c3e32f907dcb61cf5d9c47c2c7.seg 03467517 +02958343/points/9f4bbcf9f51fe1e42957c02bdefc95c8.pts 02958343/expert_verified/points_label/9f4bbcf9f51fe1e42957c02bdefc95c8.seg 02958343 +03467517/points/173e4f1824f7b9fa93f0194265a9746c.pts 03467517/expert_verified/points_label/173e4f1824f7b9fa93f0194265a9746c.seg 03467517 +03636649/points/b4f166440439171741657e31b569b105.pts 03636649/expert_verified/points_label/b4f166440439171741657e31b569b105.seg 03636649 +03948459/points/d1ba405fef56efa0fa29682ba98e856d.pts 03948459/expert_verified/points_label/d1ba405fef56efa0fa29682ba98e856d.seg 03948459 +03467517/points/a39dcefa599a76dd93f0194265a9746c.pts 03467517/expert_verified/points_label/a39dcefa599a76dd93f0194265a9746c.seg 03467517 +02958343/points/e213d976734431773a3afd30f2e86bd7.pts 02958343/expert_verified/points_label/e213d976734431773a3afd30f2e86bd7.seg 02958343 +04379243/points/b1335d826d7d60726e066e11deddab75.pts 04379243/expert_verified/points_label/b1335d826d7d60726e066e11deddab75.seg 04379243 +04379243/points/e37262abd76852ac00ee852f6d8aa3c.pts 04379243/expert_verified/points_label/e37262abd76852ac00ee852f6d8aa3c.seg 04379243 +03001627/points/5d346bdb7db27accf3588493d5c284.pts 03001627/expert_verified/points_label/5d346bdb7db27accf3588493d5c284.seg 03001627 +04379243/points/198ff59a42a147eb8ac5948d70801389.pts 04379243/expert_verified/points_label/198ff59a42a147eb8ac5948d70801389.seg 04379243 +03001627/points/b3fd987b330d0d2acda56795a6fbde1f.pts 03001627/expert_verified/points_label/b3fd987b330d0d2acda56795a6fbde1f.seg 03001627 +02691156/points/1cb757280b862ae52c7575c9089791ff.pts 02691156/expert_verified/points_label/1cb757280b862ae52c7575c9089791ff.seg 02691156 +03636649/points/4631e756666a8a208ca4aeb5e3b33af7.pts 03636649/expert_verified/points_label/4631e756666a8a208ca4aeb5e3b33af7.seg 03636649 +04379243/points/b82c6769c98e877d24d29f1dedd03a57.pts 04379243/expert_verified/points_label/b82c6769c98e877d24d29f1dedd03a57.seg 04379243 +03636649/points/2b194d6bed8daa82c0b2dda5ff15ea28.pts 03636649/expert_verified/points_label/2b194d6bed8daa82c0b2dda5ff15ea28.seg 03636649 +03001627/points/7e6b4a7b4dd60c40cc8bd7a04c9659f1.pts 03001627/expert_verified/points_label/7e6b4a7b4dd60c40cc8bd7a04c9659f1.seg 03001627 +03948459/points/d1cc54762432fd058a2c998c0df41abe.pts 03948459/expert_verified/points_label/d1cc54762432fd058a2c998c0df41abe.seg 03948459 +04225987/points/776eaffd7cbe7bc6b9e8bdc9c4a49aa2.pts 04225987/expert_verified/points_label/776eaffd7cbe7bc6b9e8bdc9c4a49aa2.seg 04225987 +04379243/points/6ce30b0327db26f340b4c5428883e585.pts 04379243/expert_verified/points_label/6ce30b0327db26f340b4c5428883e585.seg 04379243 +04379243/points/c5230678204a1bb8dcfcef693e7ec696.pts 04379243/expert_verified/points_label/c5230678204a1bb8dcfcef693e7ec696.seg 04379243 +02691156/points/563cef4df464ddb1e153dd90dac45a6d.pts 02691156/expert_verified/points_label/563cef4df464ddb1e153dd90dac45a6d.seg 02691156 +02958343/points/42e6ce03b361102ab86e0633bb69faea.pts 02958343/expert_verified/points_label/42e6ce03b361102ab86e0633bb69faea.seg 02958343 +03001627/points/26e8033e59a3adf6bb53a6a5f5051240.pts 03001627/expert_verified/points_label/26e8033e59a3adf6bb53a6a5f5051240.seg 03001627 +04379243/points/731b983cb313634fd018082a1777a5f8.pts 04379243/expert_verified/points_label/731b983cb313634fd018082a1777a5f8.seg 04379243 +02691156/points/10aa040f470500c6a66ef8df4909ded9.pts 02691156/expert_verified/points_label/10aa040f470500c6a66ef8df4909ded9.seg 02691156 +03467517/points/bb895a87931f51c893f0194265a9746c.pts 03467517/expert_verified/points_label/bb895a87931f51c893f0194265a9746c.seg 03467517 +03624134/points/a105080ce4564145aeb54153795ede63.pts 03624134/expert_verified/points_label/a105080ce4564145aeb54153795ede63.seg 03624134 +04379243/points/c12147db9b29ef9ee0480c954dcd56d1.pts 04379243/expert_verified/points_label/c12147db9b29ef9ee0480c954dcd56d1.seg 04379243 +04379243/points/21cdc417e398378e40f3ac0af6b7e700.pts 04379243/expert_verified/points_label/21cdc417e398378e40f3ac0af6b7e700.seg 04379243 +04379243/points/b11e0feb428f61edf008d8a3590fb522.pts 04379243/expert_verified/points_label/b11e0feb428f61edf008d8a3590fb522.seg 04379243 +04379243/points/2700f6693447c32d66c64744a4252d3.pts 04379243/expert_verified/points_label/2700f6693447c32d66c64744a4252d3.seg 04379243 +03467517/points/b6d0cf333c7e013993f0194265a9746c.pts 03467517/expert_verified/points_label/b6d0cf333c7e013993f0194265a9746c.seg 03467517 +03001627/points/ece627bd883d9bbfb0eb7e753c06942.pts 03001627/expert_verified/points_label/ece627bd883d9bbfb0eb7e753c06942.seg 03001627 +03636649/points/26f0f37f0f2623c4a3fa46ae73c48b4.pts 03636649/expert_verified/points_label/26f0f37f0f2623c4a3fa46ae73c48b4.seg 03636649 +04379243/points/8b07d458499d63f36d96c6cb347d6a90.pts 04379243/expert_verified/points_label/8b07d458499d63f36d96c6cb347d6a90.seg 04379243 +04379243/points/eb363770ee36b0309a79b01b89f55c86.pts 04379243/expert_verified/points_label/eb363770ee36b0309a79b01b89f55c86.seg 04379243 +04379243/points/ccf36a20b7ef3bd128071d61462a212d.pts 04379243/expert_verified/points_label/ccf36a20b7ef3bd128071d61462a212d.seg 04379243 +03001627/points/cf24fc2d10f8da31283b00891f680579.pts 03001627/expert_verified/points_label/cf24fc2d10f8da31283b00891f680579.seg 03001627 +02958343/points/8b4879617bd256391738f25e3015f92e.pts 02958343/expert_verified/points_label/8b4879617bd256391738f25e3015f92e.seg 02958343 +03001627/points/55e1cde05a99f6c7d1d34366ca81fb3b.pts 03001627/expert_verified/points_label/55e1cde05a99f6c7d1d34366ca81fb3b.seg 03001627 +03001627/points/6c25ec1178e9bab6e545858398955dd1.pts 03001627/expert_verified/points_label/6c25ec1178e9bab6e545858398955dd1.seg 03001627 +03001627/points/862f70e73fa70c9b1a719e2a845bdada.pts 03001627/expert_verified/points_label/862f70e73fa70c9b1a719e2a845bdada.seg 03001627 +04379243/points/fa5dce1043f44c06ab88e3acae6e8bc5.pts 04379243/expert_verified/points_label/fa5dce1043f44c06ab88e3acae6e8bc5.seg 04379243 +03467517/points/6f9d1467eb39f8abfae47f572c17b9cb.pts 03467517/expert_verified/points_label/6f9d1467eb39f8abfae47f572c17b9cb.seg 03467517 +04379243/points/60ef2830979fd08ec72d4ae978770752.pts 04379243/expert_verified/points_label/60ef2830979fd08ec72d4ae978770752.seg 04379243 +03624134/points/d69e028056c9291069654277b747a908.pts 03624134/expert_verified/points_label/d69e028056c9291069654277b747a908.seg 03624134 +04379243/points/8e7c894039ae2cfe99e8bf807e902261.pts 04379243/expert_verified/points_label/8e7c894039ae2cfe99e8bf807e902261.seg 04379243 +02958343/points/4e2ca20091449636599389919f6522e6.pts 02958343/expert_verified/points_label/4e2ca20091449636599389919f6522e6.seg 02958343 +04379243/points/b10d84b3a04085b17618b16b281bdf56.pts 04379243/expert_verified/points_label/b10d84b3a04085b17618b16b281bdf56.seg 04379243 +03948459/points/d13986cc2403a2034b4b3d2a28039009.pts 03948459/expert_verified/points_label/d13986cc2403a2034b4b3d2a28039009.seg 03948459 +03636649/points/d97a86cea650ae0baf5b49ad7809302.pts 03636649/expert_verified/points_label/d97a86cea650ae0baf5b49ad7809302.seg 03636649 +03797390/points/ca198dc3f7dc0cacec6338171298c66b.pts 03797390/expert_verified/points_label/ca198dc3f7dc0cacec6338171298c66b.seg 03797390 +03636649/points/3f968096c74ee3a3b04a2e6a78ff6c49.pts 03636649/expert_verified/points_label/3f968096c74ee3a3b04a2e6a78ff6c49.seg 03636649 +02691156/points/4d6ec762d1583ded46555ee25941a22e.pts 02691156/expert_verified/points_label/4d6ec762d1583ded46555ee25941a22e.seg 02691156 +03467517/points/401ff6021157dee293f0194265a9746c.pts 03467517/expert_verified/points_label/401ff6021157dee293f0194265a9746c.seg 03467517 +04379243/points/c1d808c75cc5e7ab4da5bb83ec125010.pts 04379243/expert_verified/points_label/c1d808c75cc5e7ab4da5bb83ec125010.seg 04379243 +03790512/points/3d37db1d974499287395d58407f193ba.pts 03790512/expert_verified/points_label/3d37db1d974499287395d58407f193ba.seg 03790512 +03624134/points/65892e0f7f93129d14cb807a24b99e1e.pts 03624134/expert_verified/points_label/65892e0f7f93129d14cb807a24b99e1e.seg 03624134 +03624134/points/854e7bb73afaff7591ea3afb2749822f.pts 03624134/expert_verified/points_label/854e7bb73afaff7591ea3afb2749822f.seg 03624134 +03624134/points/7b492f2baa1dc710cc34b900bb2492e.pts 03624134/expert_verified/points_label/7b492f2baa1dc710cc34b900bb2492e.seg 03624134 +03636649/points/b4b15a84b9067f94a75d03186a0409e2.pts 03636649/expert_verified/points_label/b4b15a84b9067f94a75d03186a0409e2.seg 03636649 +03636649/points/9db87bf898efd448cbde89e0c48a01bf.pts 03636649/expert_verified/points_label/9db87bf898efd448cbde89e0c48a01bf.seg 03636649 +02954340/points/9bd54e0123d3cd70a52821bf1aa3b19a.pts 02954340/expert_verified/points_label/9bd54e0123d3cd70a52821bf1aa3b19a.seg 02954340 -- Gitee From 5d4cb67710466348b0eeac9a354af2589dcd2f5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:09:44 +0000 Subject: [PATCH 29/54] =?UTF-8?q?AttRec=5FID2630=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../AttRec_ID2630_for_TensorFlow2.X/LICENSE | 21 ++ .../AttRec_ID2630_for_TensorFlow2.X/README.md | 208 ++++++++++++++++++ .../evaluate.py | 99 +++++++++ .../AttRec_ID2630_for_TensorFlow2.X/model.py | 162 ++++++++++++++ .../modelzoo_level.txt | 3 + .../modules.py | 92 ++++++++ .../requirements.txt | 0 .../AttRec_ID2630_for_TensorFlow2.X/run_1p.sh | 3 + .../test/train_full_1p.sh | 189 ++++++++++++++++ .../test/train_performance_1p.sh | 189 ++++++++++++++++ .../test/train_performance_1p_static.sh | 193 ++++++++++++++++ .../AttRec_ID2630_for_TensorFlow2.X/train.py | 208 ++++++++++++++++++ .../AttRec_ID2630_for_TensorFlow2.X/utils.py | 125 +++++++++++ 13 files changed, 1492 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/evaluate.py create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/model.py create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modules.py create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/run_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p_static.sh create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/train.py create mode 100644 TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/utils.py diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..51d555a15 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Ke YU + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..f052dd27c --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/README.md @@ -0,0 +1,208 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Recommendation** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.4.11** + +**大小(Size):44KB** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Official** + +**描述(Description):基于TensorFlow框架的长短期用户兴趣表示训练代码** + +

概述

+ +## 简述 + +模型利用了self-attention机制,通过用户历史行为来预测item-item关系。self-attention能够预测用户行为轨迹上每个item的相关权重,从而学习更好的用户短期兴趣表示。模型最终在度量学习框架上训练,且同时考虑了长短期意图。 + +- 参考论文: + + https://arxiv.org/abs/1808.06414 + +- 参考实现: + + https://github.com/ZiyaoGeng/Recommender-System-with-TF2.0/tree/master/AttRec + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + + +## 默认配置 + +- 网络结构: + - 29-layers + - 1404700 total params + +- 训练超参(单卡): + - Batch size: 512 + - Train epochs: 20 + - Learning rate: 0.001 + - Trans score: 1 + - Max len: 5 + - Embed dim: 100 + - Embed reg: 1e-6 + - Gamma: 0.5 + - Mode: 'inner' + - w: 0.5 + - K: 10 + + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 + +拉起脚本中,传入--precision_mode='allow_mix_precision' + +``` + ./train_performance_1p_16bs.sh --help + +parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message +``` + +相关代码示例: + +``` +flags.DEFINE_string(name='precision_mode', default= 'allow_fp32_to_fp16', + help='allow_fp32_to_fp16/force_fp16/ ' + 'must_keep_origin_dtype/allow_mix_precision.') + +npu_device.global_options().precision_mode=FLAGS.precision_mode +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + +

快速上手

+ +## 数据集准备 + +1、用户自行准备好数据集,本网络使用的数据集是ml-1m数据集 + +数据集目录参考如下: + +``` +├──ml_1m +│ ├──movies.dat +│ ├──ratings.dat +│ ├──README +│ ├──users.dat +``` + + + +## 模型训练 + +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + 2. 单卡训练 + + 2. 1单卡训练指令(脚本位于AttRec_ID2630_for_TensorFlow2.X/test/train_full.sh),需要先使用cd命令进入test目录下,再使用下面的命令启动训练。请确保下面例子中的“--data_path”修改为用户的数据路径,这里选择将数据文件夹放在home目录下。 + + bash train_full_1p.sh --data_path=/home + + + + +

高级参考

+ +## 脚本和示例代码 + +``` +|--LICENSE +|--README.md #说明文档 +|--evaluate.py +|--model.py +|--modules.py +|--train.py #训练代码 +|--requirements.txt #所需依赖 +|--run_1p.sh +|--utils.py +|--test #训练脚本目录 +| |--train_full_1p.sh #全量训练脚本 +| |--train_performance_1p.sh #performance训练脚本 +``` + +## 脚本参数 + +``` +--data_path # the path to train data +--epochs # epochs of training +--ckpt_save_path # directory to ckpt +--batch_size # batch size for 1p +--log_steps # log frequency +--precision_mode # the path to save over dump data +--over_dump # if or not over detection, default is False +--data_dump_flag # data dump flag, default is False +--data_dump_step # data dump step, default is 10 +--profiling # if or not profiling for performance debug, default is False +--profiling_dump_path # the path to save profiling data +--over_dump_path # the path to save over dump data +--data_dump_path # the path to save dump data +--use_mixlist # use_mixlist flag, default is False +--fusion_off_flag # fusion_off flag, default is False +--mixlist_file # mixlist file name, default is ops_info.json +--fusion_off_file # fusion_off file name, default is fusion_switch.cfg +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡或者多卡训练。单卡和多卡通过运行不同脚本,支持单卡,8卡网络训练。模型存储路径为${cur_path}/output/$ASCEND_DEVICE_ID,包括训练的log以及checkpoints文件。以8卡训练为例,loss信息在文件${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/evaluate.py b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/evaluate.py new file mode 100644 index 000000000..b8c843e86 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/evaluate.py @@ -0,0 +1,99 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on Nov 13, 2020 + +evaluate model + +@author: Ziyao Geng +""" +import pandas as pd +import numpy as np + + +def getHit(df): + """ + calculate hit rate + :return: + """ + if sum(df['pred']) < _K: + return 1 + else: + return 0 + + +def getNDCG(df): + """ + calculate NDCG + :return: + """ + if sum(df['pred']) < _K: + return 1 / np.log(sum(df['pred']) + 2) + else: + return 0. + + +def getMRR(df): + """ + calculate MRR + :return: + """ + return 1 / (sum(df['pred']) + 1) + + +def evaluate_model(model, test, K): + """ + evaluate model + :param model: model + :param test: test set + :param K: top K + :return: hit rate, ndcg + """ + global _K + _K = K + test_X = test + # predict + pos_score, neg_score = model.predict(test_X) + # create dataframe + test_df = pd.DataFrame(test_X[0], columns=['user_id']) + # if mode == 'inner', pos score < neg score, pred = 1 + if model.mode == 'inner': + test_df['pred'] = (pos_score <= neg_score).astype(np.int32) + else: + test_df['pred'] = (pos_score >= neg_score).astype(np.int32) + # groupby + tg = test_df.groupby('user_id') + # calculate hit + hit_rate = tg.apply(getHit).mean() + # calculate ndcg + ndcg = tg.apply(getNDCG).mean() + # calculate mrr + mrr = tg.apply(getMRR).mean() + return hit_rate, ndcg, mrr \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/model.py b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/model.py new file mode 100644 index 000000000..e56471cf7 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/model.py @@ -0,0 +1,162 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on Nov 10, 2020 + +model: Next Item Recommendation with Self-Attentive Metric Learning + +@author: Ziyao Geng +""" + +import tensorflow as tf + +from tensorflow.keras import Model +from tensorflow.keras.layers import Embedding, Input +from tensorflow.keras.regularizers import l2 + +from modules import * + + +class AttRec(Model): + def __init__(self, feature_columns, maxlen=40, mode='inner', gamma=0.5, w=0.5, embed_reg=1e-6, **kwargs): + """ + AttRec + :param feature_columns: A feature columns list. user + seq + :param maxlen: A scalar. In the paper, maxlen is L, the number of latest items. + :param gamma: A scalar. if mode == 'dist', gamma is the margin. + :param mode: A string. inner or dist. + :param w: A scalar. The weight of short interest. + :param embed_reg: A scalar. The regularizer of embedding. + """ + super(AttRec, self).__init__(**kwargs) + # maxlen + self.maxlen = maxlen + # w + self.w = w + self.gamma = gamma + self.mode = mode + # feature columns + self.user_fea_col, self.item_fea_col = feature_columns + # embed_dim + self.embed_dim = self.item_fea_col['embed_dim'] + # user embedding + self.user_embedding = Embedding(input_dim=self.user_fea_col['feat_num'], + input_length=1, + output_dim=self.user_fea_col['embed_dim'], + mask_zero=False, + embeddings_initializer='random_normal', + embeddings_regularizer=l2(embed_reg)) + # item embedding + self.item_embedding = Embedding(input_dim=self.item_fea_col['feat_num'], + input_length=1, + output_dim=self.item_fea_col['embed_dim'], + mask_zero=True, + embeddings_initializer='random_normal', + embeddings_regularizer=l2(embed_reg)) + # item2 embedding, not share embedding + self.item2_embedding = Embedding(input_dim=self.item_fea_col['feat_num'], + input_length=1, + output_dim=self.item_fea_col['embed_dim'], + mask_zero=True, + embeddings_initializer='random_normal', + embeddings_regularizer=l2(embed_reg)) + # self-attention + self.self_attention = SelfAttention_Layer() + + def call(self, inputs, **kwargs): + # input + user_inputs, seq_inputs, pos_inputs, neg_inputs = inputs + # mask + # mask = self.item_embedding.compute_mask(seq_inputs) + mask = tf.cast(tf.not_equal(seq_inputs, 0), dtype=tf.float32) # (None, maxlen) + # user info + user_embed = self.user_embedding(tf.squeeze(user_inputs, axis=-1)) # (None, dim) + # seq info + seq_embed = self.item_embedding(seq_inputs) # (None, maxlen, dim) + # item + pos_embed = self.item_embedding(tf.squeeze(pos_inputs, axis=-1)) # (None, dim) + neg_embed = self.item_embedding(tf.squeeze(neg_inputs, axis=-1)) # (None, dim) + # item2 embed + pos_embed2 = self.item2_embedding(tf.squeeze(pos_inputs, axis=-1)) # (None, dim) + neg_embed2 = self.item2_embedding(tf.squeeze(neg_inputs, axis=-1)) # (None, dim) + + # short-term interest + short_interest = self.self_attention([seq_embed, seq_embed, seq_embed, mask]) # (None, dim) + + # mode + if self.mode == 'inner': + # long-term interest, pos and neg + pos_long_interest = tf.multiply(user_embed, pos_embed2) + neg_long_interest = tf.multiply(user_embed, neg_embed2) + # combine + pos_scores = self.w * tf.reduce_sum(pos_long_interest, axis=-1, keepdims=True) \ + + (1 - self.w) * tf.reduce_sum(tf.multiply(short_interest, pos_embed), axis=-1, keepdims=True) + neg_scores = self.w * tf.reduce_sum(neg_long_interest, axis=-1, keepdims=True) \ + + (1 - self.w) * tf.reduce_sum(tf.multiply(short_interest, neg_embed), axis=-1, keepdims=True) + self.add_loss(tf.reduce_mean(-tf.math.log(tf.nn.sigmoid(pos_scores - neg_scores)))) + else: + # clip by norm + user_embed = tf.clip_by_norm(user_embed, 1, -1) + pos_embed = tf.clip_by_norm(pos_embed, 1, -1) + neg_embed = tf.clip_by_norm(neg_embed, 1, -1) + pos_embed2 = tf.clip_by_norm(pos_embed2, 1, -1) + neg_embed2 = tf.clip_by_norm(neg_embed2, 1, -1) + # distance + # long-term interest, pos and neg + pos_long_interest = tf.square(user_embed - pos_embed2) # (None, dim) + neg_long_interest = tf.square(user_embed - neg_embed2) # (None, dim) + # combine. Here is a difference from the original paper. + pos_scores = self.w * tf.reduce_sum(pos_long_interest, axis=-1, keepdims=True) + \ + (1 - self.w) * tf.reduce_sum(tf.square(short_interest - pos_embed), axis=-1, keepdims=True) + neg_scores = self.w * tf.reduce_sum(neg_long_interest, axis=-1, keepdims=True) + \ + (1 - self.w) * tf.reduce_sum(tf.square(short_interest - neg_embed), axis=-1, keepdims=True) + # minimize loss + # self.add_loss(tf.reduce_sum(tf.maximum(pos_scores - neg_scores + self.gamma, 0))) + self.add_loss(tf.reduce_sum(tf.nn.relu(pos_scores - neg_scores + self.gamma))) + return pos_scores, neg_scores + + def summary(self): + seq_inputs = Input(shape=(self.maxlen,), dtype=tf.int32) + user_inputs = Input(shape=(1, ), dtype=tf.int32) + pos_inputs = Input(shape=(1, ), dtype=tf.int32) + neg_inputs = Input(shape=(1, ), dtype=tf.int32) + Model(inputs=[user_inputs, seq_inputs, pos_inputs, neg_inputs], + outputs=self.call([user_inputs, seq_inputs, pos_inputs, neg_inputs])).summary() + + +def test_model(): + user_features = {'feat': 'user_id', 'feat_num': 100, 'embed_dim': 8} + seq_features = {'feat': 'item_id', 'feat_num': 100, 'embed_dim': 8} + features = [user_features, seq_features] + model = AttRec(features, mode='dist') + model.summary() + + +# test_model() \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..a829ab59b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:NOK +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modules.py b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modules.py new file mode 100644 index 000000000..c325b2f25 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/modules.py @@ -0,0 +1,92 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on Nov 10, 2020 + +modules of AttRec: self-attention mechanism + +@author: Ziyao Geng +""" + +import tensorflow as tf +import numpy as np +import math + +from tensorflow.keras.layers import Layer, Dense +from tensorflow.keras.losses import Loss + + +class SelfAttention_Layer(Layer): + def __init__(self): + super(SelfAttention_Layer, self).__init__() + + def build(self, input_shape): + self.dim = input_shape[0][-1] + self.W = self.add_weight(shape=[self.dim, self.dim], name='weight', + initializer='random_uniform') + + def call(self, inputs, **kwargs): + q, k, v, mask = inputs + # pos encoding + k += self.positional_encoding(k) + q += self.positional_encoding(q) + # Nonlinear transformation + q = tf.nn.relu(tf.matmul(q, self.W)) # (None, seq_len, dim) + k = tf.nn.relu(tf.matmul(k, self.W)) # (None, seq_len, dim) + mat_qk = tf.matmul(q, k, transpose_b=True) # (None, seq_len, seq_len) + dk = tf.cast(self.dim, dtype=tf.float32) + # Scaled + scaled_att_logits = mat_qk / tf.sqrt(dk) + # Mask + mask = tf.tile(tf.expand_dims(mask, 1), [1, q.shape[1], 1]) # (None, seq_len, seq_len) + paddings = tf.ones_like(scaled_att_logits) * (-2 ** 32 + 1) + outputs = tf.where(tf.equal(mask, 0), paddings, scaled_att_logits) # (None, seq_len, seq_len) + # softmax + outputs = tf.nn.softmax(logits=outputs, axis=-1) # (None, seq_len, seq_len) + # output + outputs = tf.matmul(outputs, v) # (None, seq_len, dim) + outputs = tf.reduce_mean(outputs, axis=1) # (None, dim) + return outputs + + @staticmethod + def get_angles(pos, i, d_model): + angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model)) + return pos * angle_rates + + def positional_encoding(self, QK_input): + angle_rads = self.get_angles(np.arange(QK_input.shape[1])[:, np.newaxis], + np.arange(self.dim)[np.newaxis, :], self.dim) + angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2]) + angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2]) + pos_encoding = angle_rads[np.newaxis, ...] + + return tf.cast(pos_encoding, dtype=tf.float32) + + diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/run_1p.sh b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/run_1p.sh new file mode 100644 index 000000000..837e2c7d2 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/run_1p.sh @@ -0,0 +1,3 @@ +cur_path='pwd' +python3 ${cur_path}/train.py --epochs=40 --data_path=. --batch_size=1024 --ckpt_save_path="" --precision_mode="" > loss+perf_gpu.txt 2>&1 + diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..3146a4489 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,189 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 +export NPU_CALCULATE_DEVICE=$ASCEND_DEVICE_ID +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=$ASCEND_DEVICE_ID + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="AttRec_ID2630_for_TensorFlow2.X" +#训练epoch +train_epochs=20 +#训练batch_size +batch_size=512 + + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="${cur_path}/../configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="${cur_path}/../configs/fusion_switch.cfg" +############维测参数############## + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --log_steps* ]];then + log_steps=`echo ${para#*=}` + fi +done +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_full_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 train.py --epochs=$train_epochs \ + --batch_size=$batch_size \ + --data_path=$data_path \ + --log_steps=1919\ + --ckpt_save_path="" \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ,time: $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $4}' | awk -F ':' '{print $2}' | tail -n 1` +wait +FPS=`grep imgs/s $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $2}' | tail -n 1` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep NDCG $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $11}'|tr -cd "[0-9]*\.[0-9]"` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '\- loss:' $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..2df0ed262 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,189 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 +export NPU_CALCULATE_DEVICE=$ASCEND_DEVICE_ID +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=$ASCEND_DEVICE_ID + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="AttRec_ID2630_for_TensorFlow2.X" +#训练epoch +train_epochs=10 +#训练batch_size +batch_size=512 + + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="${cur_path}/../configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="${cur_path}/../configs/fusion_switch.cfg" +############维测参数############## + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --log_steps* ]];then + log_steps=`echo ${para#*=}` + fi +done +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_full_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 train.py --epochs=$train_epochs \ + --batch_size=$batch_size \ + --data_path=$data_path \ + --log_steps=960 \ + --ckpt_save_path="" \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ,time: $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $4}' | awk -F ':' '{print $2}' | tail -n 1` +wait +FPS=`grep imgs/s $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $2}' | tail -n 1` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep NDCG $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $11}'|tr -cd "[0-9]*\.[0-9]"` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '\- loss:' $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p_static.sh new file mode 100644 index 000000000..de4d8a089 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/test/train_performance_1p_static.sh @@ -0,0 +1,193 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 +export NPU_CALCULATE_DEVICE=$ASCEND_DEVICE_ID +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=$ASCEND_DEVICE_ID + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="AttRec_ID2630_for_TensorFlow2.X" +#训练epoch +train_epochs=10 +#训练batch_size +batch_size=512 + + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="${cur_path}/../configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="${cur_path}/../configs/fusion_switch.cfg" +############维测参数############## + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --log_steps* ]];then + log_steps=`echo ${para#*=}` + fi +done +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_full_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 train.py --epochs=$train_epochs \ + --batch_size=$batch_size \ + --data_path=$data_path \ + --log_steps=960 \ + --ckpt_save_path="" \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --static=1> ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +Step=`grep val_loss $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +2 | awk '{print $1}' | awk -F "/" '{print $1}' |awk '{sum+=$1} END {print sum/NR}'` +Time=`grep val_loss $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +3 | awk '{print $3}' | tr -d s | awk '{sum+=$1} END {print sum/NR}'` +TrainingTime=`awk 'BEGIN{printf "%.6f\n",'${Time}'/'${Step}'}'` + +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep NDCG $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $11}'|tr -cd "[0-9]*\.[0-9]"` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '\- loss:' $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}_static" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..1ba29affb --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/train.py @@ -0,0 +1,208 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on Nov 11, 2020 + +train AttRec model + +@author: Ziyao Geng +""" + +import npu_device + +import os +import ast +import numpy as np +import pandas as pd +import tensorflow as tf +from time import time +from tensorflow.keras.optimizers import Adam + +from model import AttRec +from modules import * +from evaluate import * +from utils import * + +import argparse + +def parse_args(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_path', default='', help="""directory to data""") + parser.add_argument('--ckpt_save_path', default='', help="""directory to ckpt""") + parser.add_argument('--batch_size', default=32, type=int, help="""batch size for 1p""") + parser.add_argument('--epochs', default=3, type=int, help="""epochs""") + parser.add_argument('--log_steps', default=1, type=int, help="""log frequency""") + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--static', default=0, type=int, help="""static""") + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + + +args = parse_args() +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode=args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + npu_device.open().as_default() + +npu_config() + +class LossHistory(tf.keras.callbacks.Callback): + def __init__(self, bs): + super().__init__() + self.batch_size = bs + def on_batch_begin(self, batch, logs={}): + self.start = time() + def on_batch_end(self, batch, logs={}): + if batch % args.log_steps == 0: + loss = logs.get('loss') + dura = time() - self.start + if dura < 10: + self.epoch_perf.append(dura) + print('step:%d ,loss: %f ,time:%f'%(batch, loss, dura), flush=True) + def on_epoch_begin(self, epoch, logs={}): + self.epoch_perf = [] + self.epochstart = time() + def on_epoch_end(self, epoch, logs={}): + duration = time() - self.epochstart + print('epoch_duration: ', duration) + self.perf.append(np.mean(self.epoch_perf)) + def on_train_begin(self, logs={}): + print('params: ', self.params) + self.perf = [] + def on_train_end(self, logs={}): + print('imgs/s: %.2f'%(self.batch_size / np.mean(self.perf))) + + +if __name__ == '__main__': + + # ========================= Hyper Parameters ======================= + file = 'ratings.dat' + file = os.path.join(args.data_path, file) + print(file) + trans_score = 1 + maxlen = 5 + + embed_dim = 100 + embed_reg = 1e-6 # 1e-6 + gamma = 0.5 + mode = 'inner' # 'inner' or 'dist' + w = 0.5 + K = 10 + + learning_rate = 0.001 + epochs = args.epochs + batch_size = args.batch_size + # ========================== Create dataset ======================= + feature_columns, train, val, test = create_implicit_ml_1m_dataset(file, trans_score, embed_dim, maxlen) + if args.static==1: + print('=====================[DEBUG]======================',flush=True) + train_X = [np.array(train[0][:982016],dtype='int32'),np.array(train[1][:982016],dtype='int32'),np.array(train[2][:982016],dtype='int32'),np.array(train[3][:982016],dtype='int32')] + val_X = [np.array(val[0][:5632],dtype='int32'),np.array(val[1][:5632],dtype='int32'),np.array(val[2][:5632],dtype='int32'),np.array(val[3][:5632],dtype='int32')] + print(train_X[0].shape,train_X[1].shape,train_X[2].shape,train_X[3].shape,flush=True) + print(val_X[0].shape,val_X[1].shape,val_X[2].shape,val_X[3].shape,flush=True) + + #train_X = train[:491520] + #val_X = val[:491520] + else: + train_X = train + val_X = val + # ============================Build Model========================== + model = AttRec(feature_columns, maxlen, mode, gamma, w, embed_reg) + model.summary() + logger = LossHistory(batch_size) + # =========================Compile============================ + model.compile(optimizer=Adam(learning_rate=learning_rate)) + + results = [] + for epoch in range(1, epochs + 1): + # ===========================Fit============================== + t1 = time() + model.fit( + train_X, + None, + validation_data=(val_X, None), + epochs=1, + # callbacks=[tensorboard, checkpoint], + callbacks=logger, + batch_size=batch_size, + verbose=2 + ) + save_ckpt = os.path.join(args.ckpt_save_path, "checkpoint/tf_model") + #model.save_weights(filepath=save_ckpt, save_format="tf") + # ===========================Test============================== + t2 = time() + if epoch % 5 == 0: + hit_rate, ndcg, mrr = evaluate_model(model, test, K) + print('Iteration %d Fit [%.1f s], Evaluate [%.1f s]: HR = %.4f, NDCG = %.4f, MRR = %.4f' + % (epoch, t2 - t1, time() - t2, hit_rate, ndcg, mrr)) + results.append([epoch, t2 - t1, time() - t2, hit_rate, ndcg, mrr]) + # ========================== Write Log =========================== + pd.DataFrame(results, columns=['Iteration', 'fit_time', 'evaluate_time', + 'hit_rate', 'ndcg', 'mrr']).to_csv( + 'log/AttRec_log_maxlen_{}_dim_{}_K_{}_w_{}.csv'.format(maxlen, embed_dim, K, w), index=False) diff --git a/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/utils.py b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/utils.py new file mode 100644 index 000000000..1f6f445ef --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AttRec_ID2630_for_TensorFlow2.X/utils.py @@ -0,0 +1,125 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on Nov 10, 2020 + +create implicit ml-1m dataset(update, delete dense_inputs, sparse_inputs) + +This dataset is for AttRec model use. + +@author: Ziyao Geng +""" +import pandas as pd +import numpy as np +import random +from tqdm import tqdm +from tensorflow.keras.preprocessing.sequence import pad_sequences + + +def sparseFeature(feat, feat_num, embed_dim=4): + """ + create dictionary for sparse feature + :param feat: feature name + :param feat_num: the total number of sparse features that do not repeat + :param embed_dim: embedding dimension + :return: + """ + return {'feat': feat, 'feat_num': feat_num, 'embed_dim': embed_dim} + + +def create_implicit_ml_1m_dataset(file, trans_score=2, embed_dim=8, maxlen=40): + """ + :param file: A string. dataset path. + :param trans_score: A scalar. Greater than it is 1, and less than it is 0. + :param embed_dim: A scalar. latent factor. + :param maxlen: A scalar. maxlen. + :return: user_num, item_num, train_df, test_df + """ + print('==========Data Preprocess Start============') + data_df = pd.read_csv(file, sep="::", engine='python', + names=['user_id', 'item_id', 'label', 'Timestamp']) + # implicit dataset + data_df = data_df[data_df.label >= trans_score] + + # sort + data_df = data_df.sort_values(by=['user_id', 'Timestamp']) + + train_data, val_data, test_data = [], [], [] + + item_id_max = data_df['item_id'].max() + for user_id, df in tqdm(data_df[['user_id', 'item_id']].groupby('user_id')): + pos_list = df['item_id'].tolist() + + def gen_neg(): + neg = pos_list[0] + while neg in pos_list: + neg = random.randint(1, item_id_max) + return neg + + neg_list = [gen_neg() for i in range(len(pos_list) + 100)] + for i in range(1, len(pos_list)): + hist_i = pos_list[:i] + if i == len(pos_list) - 1: + for neg in neg_list[i:]: + test_data.append([user_id, hist_i, pos_list[i], neg]) + elif i == len(pos_list) - 2: + val_data.append([user_id, hist_i, pos_list[i], neg_list[i]]) + else: + train_data.append([user_id, hist_i, pos_list[i], neg_list[i]]) + + # feature columns + user_num, item_num = data_df['user_id'].max() + 1, data_df['item_id'].max() + 1 + feature_columns = [sparseFeature('user_id', user_num, embed_dim), + sparseFeature('item_id', item_num, embed_dim)] + + # shuffle + random.shuffle(train_data) + random.shuffle(val_data) + random.shuffle(test_data) + + # create dataframe + train = pd.DataFrame(train_data, columns=['user_id', 'hist', 'pos_item', 'neg_item']) + val = pd.DataFrame(val_data, columns=['user_id', 'hist', 'pos_item', 'neg_item']) + test = pd.DataFrame(test_data, columns=['user_id', 'hist', 'pos_item', 'neg_item']) + print('==================Padding===================') + + # create dataset + def df_to_list(data): + return [data['user_id'].values, pad_sequences(data['hist'], maxlen=maxlen), + data['pos_item'].values, data['neg_item'].values] + + train_X = df_to_list(train) + val_X = df_to_list(val) + test_X = df_to_list(test) + print('============Data Preprocess End=============') + return feature_columns, train_X, val_X, test_X + + +# create_implicit_ml_1m_dataset('../dataset/ml-1m/ratings.dat', maxlen=5) \ No newline at end of file -- Gitee From 529953db6590c3b60085efc9eb96821d61a98a69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:10:36 +0000 Subject: [PATCH 30/54] =?UTF-8?q?AUTOAUGMENT=5FID2891=5Ffor=5FTensorFlow2.?= =?UTF-8?q?X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 21 ++ .../README.md | 180 +++++++++++ .../auto_augment.py | 271 ++++++++++++++++ .../cosine_annealing.py | 57 ++++ .../dataset.py | 128 ++++++++ .../example.png | Bin 0 -> 119083 bytes .../modelzoo_level.txt | 3 + .../requirements.txt | 0 .../test/train_full_1p.sh | 114 +++++++ .../test/train_performance_1p.sh | 181 +++++++++++ .../test/train_performance_1p_static.sh | 183 +++++++++++ .../train.py | 302 ++++++++++++++++++ .../utils.py | 42 +++ .../wide_resnet.py | 101 ++++++ 14 files changed, 1583 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/auto_augment.py create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/cosine_annealing.py create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/dataset.py create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/example.png create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_performance_1p_static.sh create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/train.py create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/utils.py create mode 100644 TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/wide_resnet.py diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..8697e3ca0 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Takato Kimura + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..1b89b26bd --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/README.md @@ -0,0 +1,180 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Image Classification** + +**版本(Version):1.1** + +**修改时间(Modified) :2021.10.01** + +**大小(Size)**_**:324KB** + +**框架(Framework):TensorFlow 2.4.1** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Benchmark** + +**描述(Description):基于TensorFlow框架的cv和模式识别网络训练代码** + +

概述

+ +- 开源项目Recommender System with TF2.0主要是对经典的推荐算法论文进行复现,包括Matching(召回)(MF、BPR、SASRec等)、Ranking(排序)(DeepFM、DCN等)。 + +- 参考论文: + + [https://arxiv.org/abs/1805.09501](https://arxiv.org/abs/1805.09501) + +- 参考实现: + + [https://github.com/4uiiurz1/keras-auto-augment](https://github.com/4uiiurz1/keras-auto-augment) + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + +## 默认配置 +- 网络结构 + +- 训练超参(单卡): + - Batch size: 128 + - Train epochs:200 + + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +相关代码示例。 + +``` +config_proto = tf.ConfigProto(allow_soft_placement=True) + custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add() + custom_op.name = 'NpuOptimizer' + custom_op.parameter_map["use_off_line"].b = True + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") + config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF + session_config = npu_config_proto(config_proto=config_proto) +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + + +

快速上手

+ +## 数据集准备 + +1. 数据集请用户自行获取。 + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2.1 设置单卡训练参数(脚本位于AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。 + + + ``` + batch_size=128 + #训练step + train_epochs=200 + ``` + + 2.2 单卡训练指令(脚本位于AUTOAUGMENT_ID2891_for_TensorFlow2.X/test) + + ``` + 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡 + bash train_full_1p.sh --data_path=xx + 数据集应有如下结构(数据切分可能不同) + | + ├─cifar-10-batches-py.tar.gz + + + ``` + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备 + +- 模型训练 + + 请参考“快速上手”章节 + +

高级参考

+ +## 脚本和示例代码 + + ├── README.md //说明文档 + ├── requirements.txt //依赖 + ├── train.py //主脚本 + ├── utils.py + ├── auto+augment.py + ├── cosine_annealing.py + ├── dataset.py + ├── wide_resnet.py + ├── test + | |—— train_full_1p.sh //单卡训练脚本 + | |—— train_performance_1p.sh //单卡训练脚本 + +## 脚本参数 + +``` +batch_size 训练batch_size +train_epochs 总训练epoch数 +其余参数请在utils.py中配置flag默认值 +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡训练。 +将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/auto_augment.py b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/auto_augment.py new file mode 100644 index 000000000..a45f34461 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/auto_augment.py @@ -0,0 +1,271 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import random +import numpy as np +import scipy +from scipy import ndimage +from PIL import Image, ImageEnhance, ImageOps + + +operations = { + 'ShearX': lambda img, magnitude: shear_x(img, magnitude), + 'ShearY': lambda img, magnitude: shear_y(img, magnitude), + 'TranslateX': lambda img, magnitude: translate_x(img, magnitude), + 'TranslateY': lambda img, magnitude: translate_y(img, magnitude), + 'Rotate': lambda img, magnitude: rotate(img, magnitude), + 'AutoContrast': lambda img, magnitude: auto_contrast(img, magnitude), + 'Invert': lambda img, magnitude: invert(img, magnitude), + 'Equalize': lambda img, magnitude: equalize(img, magnitude), + 'Solarize': lambda img, magnitude: solarize(img, magnitude), + 'Posterize': lambda img, magnitude: posterize(img, magnitude), + 'Contrast': lambda img, magnitude: contrast(img, magnitude), + 'Color': lambda img, magnitude: color(img, magnitude), + 'Brightness': lambda img, magnitude: brightness(img, magnitude), + 'Sharpness': lambda img, magnitude: sharpness(img, magnitude), + 'Cutout': lambda img, magnitude: cutout(img, magnitude), +} + + +def apply_policy(img, policy): + if random.random() < policy[1]: + img = operations[policy[0]](img, policy[2]) + if random.random() < policy[4]: + img = operations[policy[3]](img, policy[5]) + + return img + + +def transform_matrix_offset_center(matrix, x, y): + o_x = float(x) / 2 + 0.5 + o_y = float(y) / 2 + 0.5 + offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]]) + reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]]) + transform_matrix = offset_matrix @ matrix @ reset_matrix + return transform_matrix + + +def shear_x(img, magnitude): + magnitudes = np.linspace(-0.3, 0.3, 11) + + transform_matrix = np.array([[1, random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]), 0], + [0, 1, 0], + [0, 0, 1]]) + transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1]) + affine_matrix = transform_matrix[:2, :2] + offset = transform_matrix[:2, 2] + img = np.stack([ndimage.interpolation.affine_transform( + img[:, :, c], + affine_matrix, + offset) for c in range(img.shape[2])], axis=2) + return img + + +def shear_y(img, magnitude): + magnitudes = np.linspace(-0.3, 0.3, 11) + + transform_matrix = np.array([[1, 0, 0], + [random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]), 1, 0], + [0, 0, 1]]) + transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1]) + affine_matrix = transform_matrix[:2, :2] + offset = transform_matrix[:2, 2] + img = np.stack([ndimage.interpolation.affine_transform( + img[:, :, c], + affine_matrix, + offset) for c in range(img.shape[2])], axis=2) + return img + + +def translate_x(img, magnitude): + magnitudes = np.linspace(-150/331, 150/331, 11) + + transform_matrix = np.array([[1, 0, 0], + [0, 1, img.shape[1]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])], + [0, 0, 1]]) + transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1]) + affine_matrix = transform_matrix[:2, :2] + offset = transform_matrix[:2, 2] + img = np.stack([ndimage.interpolation.affine_transform( + img[:, :, c], + affine_matrix, + offset) for c in range(img.shape[2])], axis=2) + return img + + +def translate_y(img, magnitude): + magnitudes = np.linspace(-150/331, 150/331, 11) + + transform_matrix = np.array([[1, 0, img.shape[0]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])], + [0, 1, 0], + [0, 0, 1]]) + transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1]) + affine_matrix = transform_matrix[:2, :2] + offset = transform_matrix[:2, 2] + img = np.stack([ndimage.interpolation.affine_transform( + img[:, :, c], + affine_matrix, + offset) for c in range(img.shape[2])], axis=2) + return img + + +def rotate(img, magnitude): + magnitudes = np.linspace(-30, 30, 11) + + theta = np.deg2rad(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])) + transform_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], + [np.sin(theta), np.cos(theta), 0], + [0, 0, 1]]) + transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1]) + affine_matrix = transform_matrix[:2, :2] + offset = transform_matrix[:2, 2] + img = np.stack([ndimage.interpolation.affine_transform( + img[:, :, c], + affine_matrix, + offset) for c in range(img.shape[2])], axis=2) + return img + + +def auto_contrast(img, magnitude): + img = Image.fromarray(img) + img = ImageOps.autocontrast(img) + img = np.array(img) + return img + + +def invert(img, magnitude): + img = Image.fromarray(img) + img = ImageOps.invert(img) + img = np.array(img) + return img + + +def equalize(img, magnitude): + img = Image.fromarray(img) + img = ImageOps.equalize(img) + img = np.array(img) + return img + + +def solarize(img, magnitude): + magnitudes = np.linspace(0, 256, 11) + + img = Image.fromarray(img) + img = ImageOps.solarize(img, random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])) + img = np.array(img) + return img + + +def posterize(img, magnitude): + magnitudes = np.linspace(4, 8, 11) + + img = Image.fromarray(img) + img = ImageOps.posterize(img, int(round(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])))) + img = np.array(img) + return img + + +def contrast(img, magnitude): + magnitudes = np.linspace(0.1, 1.9, 11) + + img = Image.fromarray(img) + img = ImageEnhance.Contrast(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])) + img = np.array(img) + return img + + +def color(img, magnitude): + magnitudes = np.linspace(0.1, 1.9, 11) + + img = Image.fromarray(img) + img = ImageEnhance.Color(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])) + img = np.array(img) + return img + + +def brightness(img, magnitude): + magnitudes = np.linspace(0.1, 1.9, 11) + + img = Image.fromarray(img) + img = ImageEnhance.Brightness(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])) + img = np.array(img) + return img + + +def sharpness(img, magnitude): + magnitudes = np.linspace(0.1, 1.9, 11) + + img = Image.fromarray(img) + img = ImageEnhance.Sharpness(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])) + img = np.array(img) + return img + + +def cutout(org_img, magnitude=None): + magnitudes = np.linspace(0, 60/331, 11) + + img = np.copy(org_img) + mask_val = img.mean() + + if magnitude is None: + mask_size = 16 + else: + mask_size = int(round(img.shape[0]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))) + top = np.random.randint(0 - mask_size//2, img.shape[0] - mask_size) + left = np.random.randint(0 - mask_size//2, img.shape[1] - mask_size) + bottom = top + mask_size + right = left + mask_size + + if top < 0: + top = 0 + if left < 0: + left = 0 + + img[top:bottom, left:right, :].fill(mask_val) + + return img + + +def main(): + import matplotlib.pyplot as plt + from keras.datasets import cifar10 + + (x_train, y_train), (x_test, y_test) = cifar10.load_data() + + img = x_train[0] + for key, op in zip(operations.keys(), operations.values()): + print(key) + dst = op(img, random.randint(0, 9)) + plt.imshow(dst) + plt.axis('off') + plt.show() + + +if __name__ == '__main__': + main() diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/cosine_annealing.py b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/cosine_annealing.py new file mode 100644 index 000000000..dec897f1b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/cosine_annealing.py @@ -0,0 +1,57 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import math +from keras.callbacks import Callback +from keras import backend as K + + +class CosineAnnealingScheduler(Callback): + """Cosine annealing scheduler. + """ + + def __init__(self, T_max, eta_max, eta_min=0, verbose=0): + super(CosineAnnealingScheduler, self).__init__() + self.T_max = T_max + self.eta_max = eta_max + self.eta_min = eta_min + self.verbose = verbose + + def on_epoch_begin(self, epoch, logs=None): + if not hasattr(self.model.optimizer, 'lr'): + raise ValueError('Optimizer must have a "lr" attribute.') + lr = self.eta_min + (self.eta_max - self.eta_min) * (1 + math.cos(math.pi * epoch / self.T_max)) / 2 + K.set_value(self.model.optimizer.lr, lr) + if self.verbose > 0: + print('\nEpoch %05d: CosineAnnealingScheduler setting learning ' + 'rate to %s.' % (epoch + 1, lr)) + + def on_epoch_end(self, epoch, logs=None): + logs = logs or {} + logs['lr'] = K.get_value(self.model.optimizer.lr) diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/dataset.py b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/dataset.py new file mode 100644 index 000000000..de44921ce --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/dataset.py @@ -0,0 +1,128 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import random +from keras.preprocessing.image import ImageDataGenerator + +from auto_augment import cutout, apply_policy +from utils import * + + +class Cifar10ImageDataGenerator: + def __init__(self, args): + self.datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, fill_mode='constant', cval=0, horizontal_flip=True) + + self.means = np.array([0.4914009 , 0.48215896, 0.4465308]) + self.stds = np.array([0.24703279, 0.24348423, 0.26158753]) + + self.args = args + if args.auto_augment: + self.policies = [ + ['Invert', 0.1, 7, 'Contrast', 0.2, 6], + ['Rotate', 0.7, 2, 'TranslateX', 0.3, 9], + ['Sharpness', 0.8, 1, 'Sharpness', 0.9, 3], + ['ShearY', 0.5, 8, 'TranslateY', 0.7, 9], + ['AutoContrast', 0.5, 8, 'Equalize', 0.9, 2], + ['ShearY', 0.2, 7, 'Posterize', 0.3, 7], + ['Color', 0.4, 3, 'Brightness', 0.6, 7], + ['Sharpness', 0.3, 9, 'Brightness', 0.7, 9], + ['Equalize', 0.6, 5, 'Equalize', 0.5, 1], + ['Contrast', 0.6, 7, 'Sharpness', 0.6, 5], + ['Color', 0.7, 7, 'TranslateX', 0.5, 8], + ['Equalize', 0.3, 7, 'AutoContrast', 0.4, 8], + ['TranslateY', 0.4, 3, 'Sharpness', 0.2, 6], + ['Brightness', 0.9, 6, 'Color', 0.2, 8], + ['Solarize', 0.5, 2, 'Invert', 0, 0.3], + ['Equalize', 0.2, 0, 'AutoContrast', 0.6, 0], + ['Equalize', 0.2, 8, 'Equalize', 0.6, 4], + ['Color', 0.9, 9, 'Equalize', 0.6, 6], + ['AutoContrast', 0.8, 4, 'Solarize', 0.2, 8], + ['Brightness', 0.1, 3, 'Color', 0.7, 0], + ['Solarize', 0.4, 5, 'AutoContrast', 0.9, 3], + ['TranslateY', 0.9, 9, 'TranslateY', 0.7, 9], + ['AutoContrast', 0.9, 2, 'Solarize', 0.8, 3], + ['Equalize', 0.8, 8, 'Invert', 0.1, 3], + ['TranslateY', 0.7, 9, 'AutoContrast', 0.9, 1], + ] + + def standardize(self, x): + x = x.astype('float32') / 255 + + means = self.means.reshape(1, 1, 1, 3) + stds = self.stds.reshape(1, 1, 1, 3) + + x -= means + x /= (stds + 1e-6) + + return x + + def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, + seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None): + batches = self.datagen.flow(x, y, batch_size, shuffle, sample_weight, + seed, save_to_dir, save_prefix, save_format, subset) + + while True: + x_batch, y_batch = next(batches) + + if self.args.cutout: + for i in range(x_batch.shape[0]): + x_batch[i] = cutout(x_batch[i]) + + if self.args.auto_augment: + x_batch = x_batch.astype('uint8') + for i in range(x_batch.shape[0]): + x_batch[i] = apply_policy(x_batch[i], self.policies[random.randrange(len(self.policies))]) + + x_batch = self.standardize(x_batch) + + yield x_batch, y_batch + + +def main(): + import argparse + import matplotlib.pyplot as plt + from keras.datasets import cifar10 + + parser = argparse.ArgumentParser() + parser.add_argument('--cutout', default=True, type=str2bool) + parser.add_argument('--auto-augment', default=True, type=str2bool) + args = parser.parse_args() + + datagen = Cifar10ImageDataGenerator(args) + + (x_train, y_train), (x_test, y_test) = cifar10.load_data() + + for imgs, _ in datagen.flow(x_train, y_train): + plt.imshow(imgs[0].astype('uint8')) + plt.axis('off') + plt.show() + + +if __name__ == '__main__': + main() diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/example.png b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/example.png new file mode 100644 index 0000000000000000000000000000000000000000..cac1a25100ae5b9a1864ff67b2515eb025ac2bdf GIT binary patch literal 119083 zcmbT7cTf{=zxJ_#iWE^nS^}aVU3w29LO>}ZN-shL3=n#Ugr8ESh2ERel@?0qy+ms0 zB>_V3p-2zy<^1uU^UO2v%rocB>`r!f?%m96=AQ3$UEk0B{hbDw>Mqk=GBPqMRh8H8 z$;fWP$;hrN+$O)ef=6`eU47kjex>^1_SMJd_UGWM^E(bI`p#rzlyv{KUmKc6oLya% za#7NA(YCjAaW`|aAcL8?IM~{|*h05;YsG zF4!fUP!Z2yVK`Ri>j>44r{_-2{la)tN<6aBh!p$yB80HF;nQG0U5@QvIYba(PgQ^3RG|Iu&A%bwV-OqD3H?i z%7D{v$#ODQFavJ?^Iov0{@O_}-Pxx7WtZ**u*v;w>%m>7ws~h$W}_0LV>+KPnqAL4 zO$6P%UzMD6H9#)-sOwW)fkb}^b#Y14zEr&?)-_qgFppa#FMj^1RLsNQ_uAyc32&V(h&<&oU+d&AK&XamJJu5kqCaCo-TVGdU}X7 z11wn7YbF#M^4sDyWl!28DpM|RR-{Qr5Z0aLAJ3NN-}4J8A#|WoH6Mejb$-8MIoX<( zPU%rdUiu{e=HA)mS3PE4hu8@@wc#lg+Ie&5d8~^LGnV(`CbnK(tP0Jp@r3m2MXS_P za}-nyn)_aFnUUhHm-WrP`yHY{etX+j$ua!1?m2sY8#)h8z1Wc*(xS0J6C=w3@|S-% z)+xfDs=B%K*6cxiDRBMYqt;puMgweiCQJQeG&qs2Oo1b4Yuj*_@ku;eW3>asp`m|_ zRx8-~{PTUh-;W~WVu3=%0K{j2)3 zaDtzQ_ilKLtNARvV)L@eb8}#ilRkcTpB7X{Xmu>hiSvADroFa*&T1p`_7Y?N@>Ugd zg|Zr|(LS|R%A@Fm|8a|!0fous;W!(S;4{a3TqTFxf|Q>=KCWJ4iJx9X{A!zfazTXW z9Vz-;YQn!=ANLcj{0hlsVhY;H+52$Rbb4yqjLqF?thpDFk_9zLv0yPOuj2zY=AFa7 zL7e+LrOJ|KnGltVqgE))X}J99d_;tl=^5#g3bsymp_`GrUDjla$ z6yj@WX*20mBU^2P*9)i!$YG|{1(%)$f(!a`-$!QGZZ@d-B2j8aDX+Eun6upb^~fvG zg=u^(u(b(>-f1*Uc~}R(pgtP2QEfb2KPoM%kg2BEtl0JPuDb@kJT$%Bil|JBLO}-X zdSCOd$m(=|9R{;oD*DxmdwV*tf+d&Z^<;QH=2OuRAB(h)JdA+hVSGK|MR6?$JAdwF zdm2{9LrVsXKJv~PiF)olMToc8+S~HIGoajP*E#lS3?k+a>MHU~bn&?jN8NgqRqy8Av;+lbp^*IWL++dYulU|FYo$zUJ>JaeLe{f_5Rx^{Io!LOZWqN zj1n?+C!bS!2RMHlg3UU$Ez0n#Sb8*Nvhi6@xk7p>Z^xUGb;_;*?Fx5D!%q@SM0>8`2VJDqXOrGoa9K?`60uxK>U1Iom15k@plcbM$AH)LD5)WrJKV-Y3N)zauF3odCp8 z(wD-bcH7|}x*!uEezo_-ve1}IRvM`IV>BPS&PB~hzNdNDrz{Yb)AZGF56>Gbh%VHs zxgWb9#_CIEUIM0D>~l4GTgz%osHH1zUc5n$@3c>bdd+Jx*0D`YWso^OWz%_d^U$ zqAn6$QE`3u0|2izlIQlJS7`0$E9w`Fdl2^-k?KY%U&bLEEVjjNZODic51(lPx$f^g zZ#M}ft(UDy`-u+qjMiCp|G zX>#lCX&S@3sWr1SgQ`NFd>}ddGP{My293CBju=#&nLZAWxtPYaj)}U57v4=3jcLXo zQ&&{H&gfg&El;C9OnK3~qF#YuApE_kb$-eRjN;^4SJLb637r6AlzC~b#Gv^}tw-NK zgb|0Qe^!2h-NX0Yi#exvs;1u%7dyRePdU_|Ds^pl5wfrGq1Q}ClK~Y`|9DBO`0aL* zI_J2vTJJFKdOvu)SX;(v!pOz~HT{}=T0+-S*vE4GefOuN4W|-!yxrlvm+}8p@0)Y> za{pdwKkF_TM|7aOt=)kZPQ|FZb{#zBFij|oSElMd%>pE^+V+rS`> zgVSq!zP`+Ln$qSqk%e^O@ zExk6vg*?3D8Yu_Qln7$tAsIy1(o6Eqhr#B7%YRTpvRWm7)PvdX;oRk|THB)NhftY|b@FtQWL5Re-8sI)fB>pDRTFCx0xAClAv5bYrWBG(Bj(wTHT)K5n6DTf+-0G}c_}lZRVr>c?*wPCx!uw@9&YuDYvb@R?E1?O+@# zaNPYUwpm^TDI8u6KCmeh62s=mgNEB}=-RHwye+ImA)3yQugX1#cH(6^_VSYPQb*S# zdwA-Stc_x;k#asJ4|}f|GodOyUi|YOuM>1mJFQvUzg4t5uv;p&8JL#4OHxl!U2{(y z6_a(e_X^Y)eVem0U7*wP;E2xc&19cv^e>~P5Sx$!ZX=O8P-Tx95Fau(d$zb(3JXYn zq?GlQSQpt*rjfH+z9d}!|hZXo_jcz@$p)4X#<}q5q@(f`#b25H)sy2U2m@_17 z%;3v=Bl7*JwG2ZVsr5JJ$z>XNbBc^(13YEDxh{xNZ5WF2^d60mVnWC%;?t#f@XlWH z&Xv7GeHPySnx|{xdY9#Y^&);OK%t2gF{(sX@&1j4gF1LsRNBAk){{Z-zf4FonkWA* zp54ZWF)xW<#NReasZ;;hDr$GBa|=tZMWj@8oaw1&Ksze6yFUsblO++GCJC&nQ0V0I zvaGkKn~$en?xpWe4({C9YyXp7BTu_Vscw1uGirgbkmxD!#YFX*$X679p}F?(Ai2Qs zcMg zCEIK6LVrlec(|0f^CUk}utiEq^Uz3i?2Qy>sADrOKZZi(_jhOQ_&#GP;_*>%p9JbVp$F-D z&@jEna;%VD7XVr7?B1MB_Rb3r%M|pFQZS8G+0(YRpO?hj&ewaCy?S7fhum{cDEjEt zyO4*^YLfGsy_&sA39&0f?tLg#F6f-qw&yH+utdc|3&YrM`F$I#&Ob3`cE=O5q=^xI zm6XPn2efs}5)|Wx3F!e9HI#x1_0sYbs2@dl_{HmJA5EbJN~$dM&SiPLK0UBGp0uM`VT9YQ?(Z-X>isu1x?dEnyzdRdSf#&kcJ8_R z1ASGI;9R|DLRsOVqddj(GBDe$^72sHgphqZeZJY_^-@m|N~n0eKz+QkRO6&B8-(-{ z5SY^5Tr&4&#%vXf15)a$(|JZp&-2WuH)sf5&WvGH=zD+gu`}CQp4{!ZYGGl7gM(b<<%?`}^@X$K-|y3p-}UpsJ_h#5e?J7VxFmLQ8yeO@(I7Wy z_!p(kenys+@`c3?2FoOD+E?OpAxE`vvCA6CIqF7Vk+R|cFa`hJzy2xqA}wm?d%v}* zUGyP$wa;O(*Qfvbd;5{mU$o@VQ#`Fj02WQFIi!UDRO#@~aljZInR35Q}OH;^Y= zpIJu8c$Q;6S)aVPH$qI}=3^#w$ma476&UnB>^u?zHs)Sadx${y_>f5hE{cAyRh@r1 za>+y3Jev1GiB}hE%-n&}$hyh5i$G|91#)`V*9SsVdgt@YHGn3jS5nhP!o<(R_OaM{ zH>dH5)x{E4@%^&y#qIeZ`?|D?u!BE?Or3X$Pe#z3CqBz{C5i?-eg>1)u5{+6Hcf_x zl*d(SwZ8k!gDsT|=JJV#jn`2*FNV4Mj?Aq6Jv(mOzilOzcXb9*^~ry>qjntz?9p?V z38BZKRKxn1@B&C{gumaInEBR@LMX=i6H%nL}H8UV`0Zer006o08OW=h)_M* zqTZx^@w+C`M62%Ar_>dSg%L%vCn1vLV?Q7e3@<~zN#L(zIn-Cci^d>VueJH5E3@)K zBbm_>F)&)M*1v_3TKa(>1Ph>c?Y(i)KW942CcbDI+r^OpZHeYf;}#tYlRCUt?o9p79`s0+uADz8;*u34_3MR2EeF z+ugK<58?H)S+Pp_^pCSCMN>8I0mzC zc~odjSJ}HaP$t9=Z1)U=so`zUs3DdQp`YXnsiTsXP!KRX49>c}U`<@=lPFOzNd4qn zJTkz+J?LX_Qk!d_gv|IRPVI(Eyd;r(HMg@P60*V-OoA6Wy_okNFAc1g7kXMOC0c1^ zeLXVPX9pHFV}zGL9;|4~+low&%k}r~;#8>#5gYX`890xLR!7JjXJb%sS(_gb%<~I0Y>g2mrKkP`~e=*vT;eb!^%H>hZ| z_MXr<<3zU{itRtLDUR;?jrG~`x_i3tM^)IhGO#qJ`xX@P)f6aYH ztV;~C1G!bwu*jDrnke zHOA2NCgxcNiKMafoqOw(3?*=wgR#RqLPLWaGj~`hPCP0rRBG3m zCA1(74hPchZ5`L!hucO3bnSj}o*e86zVyMK4v28%c}nffxeLygqZ&8f`&K2q|x#cJy&9SeU<(GqPvUE$5oT}Ivo{kfQpj2$LzJaqIDBU5-u5vxYP4w?I%rpBSdl%X6^Q5Tz4?i z-VK1M-IMDsPXOD;57Lo%0Ww~9=e;Wyb<^9VT)4X8QJ`%W=@-?Ym`@oW82Q{Q9q+Ub zJw6yzCLc#?F|Cl0@ZfqVHDH1UDwyGWe_ldx7S}Pfe9;cx4*3>f6RZi3-V#CTzc(fM z5rU3?h~%+T?IvB@KukjQgBsEvD1 zyAxgyv)g@?-(V>2E%Dg|k^01=hB)(W{ek>mIjwtfA60S2agV&8XphayDVX_X#H;nU zQfRxPMbze9>TrcN`bNzU&YRD;Iyyla%L2~V@hjxx#i0o0Q1yM~TF154Ssb9#3JZLVtuF_jvb7h; zhVNZ7G;DnIAIq!T$C;133-l$a>UeY+6dk~8Cd`XAR+I0K2L@S{5*<5OV1qrSr~6*E zluz)rGvg7mri6BEE&GL~rAp0w7I8R$8CqlPK#hu8Z)rsmuH;;B??B*BWQ}Zt5+bAxjs`7XVWEv@oOAb zNJX5`6s+i1ewh|G4D39j$>h2M9Fm$hK!KROehwG#o+iqj^Qluz3ko-X9LNEOyGgWWv%;T>2J#!*ok1@ z)%(Gr+y)e%6JuM;90lCvYz^CRLVW<4{sv-sjY-VQ$AxJD^?o92+a&Vv%cgU~rti?- zk`hyw%~HXHqbMM&k*0dqW_IghJ_IMd+Q)flv1DedXWU#l5yhSCLc zsvOb^WIoMo5U1RQ17ELYIH`s>rV>elu?}J9C?SInYJHo26~ONf<{Rax%o>f2Oxl!i zCz2%LvtYe+H3$_5vv!gn%R5RdQMAp;w@vq%=T`APyc?r)Q&PiZj5NUoy5LC68+<@} znc{nS(sEF8QPM;XwPS9$-uPFSl9o`tJ2Xq05-cNCFO?o2#m5FXm*zD*P}i7m(A6pB zcRiiBaJ_RfDKc?^ZP+#mE@x!Y^`XIc$Lae}Sc_kU>Uq!$VLsa!X8iG_bwEThfR!nz z!ijO^T_~$D@zw}oB{A&0a!*pn9XDZlq^U)i(CZM2MJm)>uheR5UP0-dFaCWVuqXPP zDy6HyJsQ6)b!WHG(r|F%^$G5FOjjVN(YCm1mDT)L=hKRhXZYBk(yENd^_6$y)un?3 z<)VJN#dXE96}@W~(}KkP-Ct$^Q9ws6WM`&WfFJmK_tYxT~%x+CI;;b;W6{h?MWM#D@u3y0dHN$p%f1^r%i z{yQQzGHNmuL5G}(+M}0gY$PbCXXde%r1kxb&=Dd8#VW3$+q$Qko0c&B$a^ux;&^4v zf6_h&cl{7F%`K}t`@0aSqmjkXr@X=0-oP&n&=m5Gb)z|`|N3@KnW=|IS0;G(xs1Vf zuOM|XlA(0V^t`jle?s^<7WAM~kbfS&Rm&h&7*ts{8N_w#Hw}C~feo=Mwa@yOG=izs zx9vr}g89b2YZT~jya}DYH@t$Kb^OiDyg!{rZC&XO1WDIVa}hd+gM8Inkvf_cNOPIG zj^5^FKTEH1Nr&^k;A@c0t4zc~tJ z*$9g~DS>v%)vH0o(Mj8lULL51ybpmp)h04v+?ol497o8beN7>Vo6+xOU(3Iq0^xhe zE$HV`j>)%5Bj_sQc1~>&V${7(Zfu2vZUjUg?2==DMm%h%<0dbmY z78y=p-IvFj6yuj$A{hY{f|sKSY@l4$x^#04c9~LA{3bg(ZSJn8 zz`gN-->ssk3s&N7>9Q)*X4PE5zS=pb{Tmqdz@9WQO@u&QUy?(D-J&cf7O$ne?cPq0av)a z0B16+-l9^pB`^l13odiMkWs1ZmK`7KDiN`mXUhL%cIjqrOUc8&EgF<02H=}>w-EWj z)hht50ik2Y#L+M=Fp;NkUE2N=00V}mJJ_aDcCgo|QJ!yi$lay*H_l^h8WUs(L7_RPOGVk3gon>~cD$9O>m+@YathhN2T9riL+Fmtev)g2^TebKBae+RlpX+xWl!C@b$ z2;TZypet~+E$iBFf8@HsWLXFS6h!0 zJm03xsM#K-#j9P}XmA8Ur1lH>D{Jrfsa%%_PHi{R>WbntVlk>wHo<~j2 z%=Qa{F)wI0d8|KW3g;)-y*aPP6Hl_ebjIzELSM}46 z21EwU)5!RCOmnfL$%Y`+nKBw_UP|^)YXDB)z$5BoW~v=ohc;PAU#OflRgm4B8=Et9 zCqI4&U$p%cU)HOpmvQ)e6LCoH#lSO76NVL>N(AryoZFk=PwW+WZ_n&=trhOcXP}tR zu2bB5^y;8mA>dHf7Ir@O#B`yoa!sp z#oO+HLhYWPUh6J0k>cW=vUQ2$gulY@8PVM&!@~6U)I}@Fu+*M6&up=0)4$*69JQ#C zucSt?r~F!H|GgOIqpAqVqtN%6pR9D z{Qi#6Y@m)ed}u~^{^k6+S03mX5bZj^(RmwMG=pmu`_Ux}e@|}c>p!yGN+y?dMz&EE znYW^;FPU3mFcFc;H>Opf;^{Wjt6l8ZSjhA9+^~ z;ESOLX8=s1!%O2I8F(B**s~Ie-+G0nA}#Ywuhfj2232HO)%nwen-`A1%1Bf5niITy zc=;ogxEf)4e9&?*jRf6>%DFSnb7lCNI~W(w?xG;nc1<7dQbHgx=%=+ab@*@9kX5j1 z<8Tz=?6F%>g?-esXA{_Bqa4`cIY_+X0hi{hR1Ju{2w|D;uVf3h6GwY7(3VeC?|B1* z#-1Hoy}-hM<)tu7n&c$y?|q@72<_2K1sWksIj)F}FpS1g1;>W=rlfO_+=dQKm8CZ* z-1UA;3V;P)vk=Sd8q1EZT<`=dJ8YCe&3!d=-vMIANZ^4ExB1cq{qvWhlUfx3ndNFM znmFFQ5X+@gOr(#up&QnFCtz+|4%HZ)j4I#;9pCw<>#2g78eF_D8`*JpOdH!K}_m2uqr}8Z%_+_<5i4w9SMa$s$&^JSOL(O%%yG z((;fRlgP3ympk3rn_d1PU5WXbY@K}`e(P8uOFtQdCj$`#5awI%{aPkw#I z!sa429P*_g7!sM&5XM+n^T@BETGC)pY7<`CId*>XSZ2*AW-QXqAENdfJ0{0%pgmzr zeJ9BWu}oH+z}vQ%gVHkfuV8C#qN<#e`9xrrdA_&gN^fm52#66A-S|pYV4x z6_;>zDBQAkM$pdoPML!1l78DaUfcbXg&`k5tc!ip^S@8Sc>Ap|a!g}}1klo~)$rYe z*4#tyNAl8Xz|#~b6_E*?vT?@vtEO<&6YFZ%uB^6*N2yHU@T>yQnb({8-tdtHLr&|P z<>GL6)QfA-2QeN#b2xH~mLJ)f8CCljp@e6EEv%bQEApTk*e zNOLp@7YE}%SvtuXH*2?P*OfLlvh(UM{<37B++Vjn;J8B+UH^H*E~NSMVq@-+`t`z? z_qSZz!s(&?MVpe@4Oy`P5VntOh#3S8QqEp zuEB)M2@1BchSqsQ5*wZJzTmCg2bafOD2)=6^nl|O(-ZO~p=PUw3V)2N4%LXfl@U*v zRTIfor99}}1}q|q=4DmOnOP#mv1_wiE?Okyz$pEzr>6++exze;m|Kpj{KM5NLp^3{}6Iav*E)6&f*U@$8 zoddV!!lvtJqwRJDokg)gt^lIM8wY=DVf)Vaabv>6+S-xy;NvO^8rFY27WPJfjfV$K zfubdT8`WNEB&`^DNm^T@V@wzVM5`O+yl)P5KI-4y z7MrpvDjDUFH>sU|d6_XNw7=E`#bmkH_WBLByYB3~I45?Jk2OVoO8-XIQ!<|8rKZ1n z4f32t7ma?7UAu>{&-!i?BCuEc7MkwW!>ebt@IW%ivaDBRFi*TU9>`P8c?7!pjnw+RKkRp6c|g9#OjC0rYHyPcuG!rYeP+H(t}`CeP;)_&HQ7vz`u`$ z_ofc$4`cZI-sWRT!5fftdIoy7!%FF3?K9%6y|UZ?7NVT|s0k#18 z`ib;um`%=tpoqHKfhoA#=jgchMDdVl4Y3wAGgykgY_;}F`IR%J@UWqt$p5GcYJ$-L}Af{DRczX_xdO@!{dhN;5XVdB}XB34` z&L7)Mniwc%`1L+TpqVq%r-wthF!kKRLZ?&}MJInVeRL1xbjWVmW}p=$LTpD`$2bRX zSUr2f>iMf%_NE?<#o>+af$nemh6NuUZ5tN;)DhqR6J+T<2UTI_nWEXv^IbFjuN?DG zm%V6M0cJn0H0_h8B)y&_RG2Y+m1?QIt#1ei-ROgjtTE9)+F2*J4$6?alx-I@@o;sR z##z5iX2`rYvQ}=z24H0b7#a4u{~qnUnEo>04ovKHG;{KLIB$BE(?qe`I#k(m;9Sxe z=(+im{IvTCsb{g18@?AV!B%v1>-Oke&^D8iz5Bb3X$V3x{j0?iMY81yD^>A~i~&F2 z%ByhE4DIWUgp3d@6Bv0`O(|Q{eZbuI%0N4!Q`K*=s@?cHy76u1~hm zbms$g+avzvg-8qMB!)&A;xR$z?Dna9Ar+2w%y(MA`Y>TkobBR(a3)VNUe4fP1oK`U zc5zLrzdkWi>;oLMYP ztprUwn1wWjHTQPECo(8)F6>z(WvB=!Ww@W(U;1o6``sJ5SSxPt<;ucv#G3rI+H0K* zk+dbDZx!gLJK(DU+c;=WuL`swccpK*i2V_F`gknQ3$Su)bi7kF)RW?T+WB$G5BC7C zL&n;}q?N9Zhm*JQQPMuK#lBk@p`t(Zdf2Ad6zJ=kMc9cYrx(e{{TG(WtPy`P8H{hFASwX7}m7O#C3HI|I#%@Te;sarw zHwANMJ`_C5gO|@OwDse9644$vFasJv>+BJO8HIf~t)|cg-NHwG^m}4cc4k)$h>S#~ z8u1VJ4=Wv+&fV(!?>EaBWRVRqhmhr%M>cz@NKX&m{|G$(zZ~^EG2((|fx;1O zvUm7K&S8}*6jkW&IN4BD1gtNm%|6r4zU-Tl!zttbu>Rx|nh@Kfd(a#@Yl{^1A>)<3 zL{pBI)9l0yl`N&kyYYUc7E6W(CGo0vd(g?+t%p5>Rx={sm*1KvCI%(0eWUWTIT3>| z3V4xNK)a8Z+c=9{v*anE7c$*MOHsDUJL1-5 ziU!9XR0qd!2C=k2w+!oR_=h%}RmTEK$u|k}7xHQh4Wlnjr z9^QEWmynodTv$*#-A2k8Nb{x7EEyCz+>5b*-gh1Ss_8nhZI&(#3riXellI*H(Q?46 zYZ@fl6ix+;3Qv*h3$?5bySOFM-*gidm#b{6VSX;5xU7)-I5{aEv%eyviafzR4>HHM zYCw}#&zl=m==#X`@0JdpTmw0ZD-y)|WpBO`UnO0IW?|18hAlwu0?U)*IQ0@_hO1u6 z+ljrLO=Dy6vt}*UR&l#8MYZFdH4PbEWmxl5J!6jTQZ?$8Os(aQ53E7rs8;m-08r@Q zZ;snvjKX#`9eY2l?nOQAy%AW%Ug&?4D+Vn(Y&~IMBci~L=^1HCz+jc0vaQLi*!T26 zDYfakNQ)@X_mlVIJJyB!bjeeW+@rOx%!qBn8-RvVs>z1|t6X*RL;cZg&-t#<1hzFj zigBr|H>ZC=%n-pWWGsoN|N9&0i6nw0W=rOtnMCG+^M%0j90cXs@@PL%o7td=u@jUf zFzcl_-R!B!nwYiWNAUti2j3nMM8p;oJ~wKeI#jL*R5`K$t*FS!$FT~8Nv zg}rZ$pKYY9^2d~}IC@tEs+8l^B4)kt5a%BSp1%_BL`;(#rpn-?t+E3fMHMUW1Ycpv zU_u@u!D4}=VP5VyJXr5^4k(5s^ZU>cpGoS}Z(n(`>c(?%TT0|*zx~U4)Jv@VM}?BS zH+L}C32jc}azYEY_hxH)$CbSme%T9@i=s6`QKl}q{VGFzV$h#vX<^tVR}Zu3g~rF- zlMetnIzypwjGh0%eWvQMvOPWV+EJ8QeO-dne>Tyznlc#}{&!J1tlHyNm^Wp_x(1M4 zl_?>UKUIK`zBNm~RwPEkhxj37Mg0v+{wjKeRTbH1S@_EM>^yFda9b>Y@MDqh3a%~= z_0G3};z+-MZMm`ULtxi{mUKzB=@I@LBC6n+F5!KmxkB@yV|fOdh^>s}sy1BzkpEQ} zD~F|Ff^ounxt8g-3UfeJEEVjgBUNWb1* zM-4rixqLBG6=F~oU~{=Q>t&Y?V<4Cb82LOhH;f@)A9|h>i?jP=2z4o?AsGEY>j0G2 z)QiI4E-EtVE(^H3*~UIMay}4_mnGN)5?1RxtM{83H-35#T9aM+#@%4?{x^n*twp?1ym=%gg@3M^&K{{Ua;GU&h4U+RQg@ zYe7v7e3s%)VdbzdgDg(&mYX#%TVE4G)jxrO+{+|2K9@ZQmg%RplKi`(pP-e6#f-gI zA*MA-GaVN?2@bnU;6D)_b~euW0&m~FDPWrw7CsX`-}Bw=5PXGG;(SJ$dGP#Mh#2>) zW_sRh!*XDQQw<*-J2m<)9fj_Z&s1w?bu#2f4YLC%KU(|#A+5zFN3^7n!_+(aTrB^9 z(Sc^;iJXe}r@NR;0^b+(Eg${yW{%p?h385pNk62PmH;x>llmAjTUiHxCmlI|*9O{z z7-oNr60d@leeW~JbeCr(j`j!+A8d-LF`F+P|AQ8h76s}n8|2I1FfMVJ$i_qfubhR97z0G-FhFXpqCG?E5pZJv%hMu^8 zbe4atoYomP(1w-fd{gnB(T%{jw4qFiNwnS)ZRdDoG=VW--YP!;2UQ-al)qI;R7M-M zGWbijtk_hp%JuOq;2>OGPrv93OSxfZNj0}m7vo!3(i&MYzcHY*)mhTMeykVu%5Kud z!U{;F+Rjd7Qjg+vbXnSf>G;~HV()TKe9%JX+;FuP#HljOTo^3w4ufLGPi+Bjg(gdw zXJxA=$H#PSFV;l3Z8QfTZ}9qTuLtUh(;M0>Q^q2Ly?yI%{l~8L{|gG+pG0-)sCYp` zS!w!xBgzs5MmJ(WC6P+uRv^ENTZo`7?SdbNuB&8#I!RM7|MJk?wF`~0cbIYFtQGG< zwxj5T)}YxxUVmg|qD%8Uwrn7Z_#0&u1DNkJS&S(9|S!!pq>GD1X*i$^J)mr=RN0`A%W+?1^mG&Iw zMV(rcVM-=?dX4|3K~n^#4!c!JUHYqJAgZyTAfmhC% zo%+GPxaY2`Gzo3gUPi>M=7~tXnfcVhM0zDU={VcKt7cWqdY&W*0Z?upW7N`lP(})H z=OpOS+h;7`vRl>(+oxn>`sEP^mz~V{+ha&T)8r?USG|f((?!b0D>-uG!@ZrjJ=8O&3eFr8+_REmEZBN>v{e{iA_dNS+jD$}Z z_bS7)+ctbX@1tJe2hm;y?l(|zaMLEWOwD!H_4+R&(Z`w+2@bThl?*hl%h17)GE8>?m4T{sf zj!*`60KgJr9ML(|7z&}rI_Im0-S5$+YpP_3e!u#)RW9BwI-C|0{y}X6DQd%T{?IV> zq#A8vr|m_%Ko}dA6z8)>L_>N*v+d_5BA@meiu`ol$dTG_XGZUUcyR_q+-8X)?n;yb zoqgyob(Sh_joS1wM4ssJENf-yELV#1Rb~DIsX=LgkcU+nm~Atd+L9yuowvWN`UoHC zumGQQXVF`&xAO}jr!u{(ljC9%uo1)^gsX3h2ouOg)xS?BSdO(E1V+tH0Z#Nx7>RY! z!TctZGW`=f*J=SuC2x=?A(hFi5b0A+*4=cJfU5;|P_qk|+iSPQ zA@@3gW$rWu)8t^W9Q5JLm~AGqNAU>s(=EgCI zBqQb1jGwuD-2P*%5O5jpO9frAGWDNua6J1a^(>s2-S3kSSm}QFff|@U-ptTQ&wCW1 z6MzozbWk)JmLMHR;D${`W~!xVpw1h|Rs2Lx^W~m}GT^K3tW_zak+X?)9~K|V|1UKz zRAW2L0W)kpqrB&40=J#r);8v^A%IzpTKCt*)V;m;#jilwWNGSM5yBaH`{=zjvOd~_ zz7(6^w7Q*Q>X7eHj|U^Wo@%}c5nT}WS*B4?=Ly~L!AI^x}Qli>6le>O~lb9wyg}jYrQo`3|?pb>g z8U|*$$|npBjLNw*_lkWttq@v7&heFj(A*jg+*%d|@|A&j=!H~I&ju})P%Tw0u!w4H zAggRw4(&!0nh_lotsFr#3O*kGF0~yz>x<*#gEn~vS&ok8z~!dtR#~Y+ehc$o3(;!_j)~_a=1K!e8r#a!)U3oI5$cVYUQA2b zswa+hB+Dw%R0WGUG*sDMFPo#Fj!;_%4WW6UA;C9X?GL&3+>kXct>%JQy< z>9Exdy%2vgwdS08kJ*^?foF1P%p(?x625sakF2nT@wN12t@36$_(ZsmZ7DosYxJ`T z%l80h%K}B19fW>d=-DCl&<={a)9WD~_!=qedKxsUF=R7uWS>`{Ce2r5^pZmj-zhz2 z+Hn0)gH~8GdoXFO#kIS&@(rAICGNxBCf0oVJXpah;&c()?ra*Ts~Kx` zBj|;jc=czYFv#Wp9e%$FH$13u2X^3iHhq|1w8I@asq-toZ4CF(K)#>wnr6fTEdZ2L zK@^1UIY-iyZ#MKcyA4l0F?}N8;cgQ8AIr6yYle8jaI?4}`ePDtR{J{!&0mM`UastDcj=9=RN_bVELqfz zzZe*P3R`2b8;7BH%lB}OX>Yhc4x;aaji(T{r5w__I%^)pI14_(ws!2rzNXLJ`D#Tl z7;)OQ{VCnV@S8`zn&DU7lG(@7V&C!hDJ@LxJW6y$I9j{!An&t#?H90w-9lsOiYE-}dUg;T#<=zCCKx7h8gba;QO$Tqxj+uiCE!=M`8Ct!eM$NiOzdZ*-3|PT zlZvx_f&X7zA4s5|NzPWcr~C>CK1!D=bc^Aja11Z(IZz*K46>;n;4k$WVu;>g{=zW0 z{)sWWT$JndS_F(pU7RPZ&u0TK+(2ca>x^S(R&P1p3j4n-07R+%sa405W%&$n*UY+f zk+b;?{Iul*3l*D9PiG}kG05932

~qx2_!O2Smv>!1T{u&RAbPc0x9?$KQfSHdkc zcF-H}T6G>(hkVKM%%k0tcJT3%k{W%_*lUfbF&2vL*W8~^kyk5Lm_Nzed^w0jU%|Fg zaIW^Ch^eA;*5#_YkiW-E0)}liZjaI7u>LYfb<@=Lt7XZ+ODHK_{`5&UKk2#VdBNp+ z0{J+Bw7$Yl?6Pkp-@P8=dm>OYOa*dl`v244?Y-LV!xdg`|jAO2%ESg-?B z@3*rscrf4P(A98&W|w_Zu3O!a2d*-khfdK1pRTRae>{7BAS^RuoKP58#k5B@rG8_R zLC<`(6k>8halNSM#h{~fyP7mRRHxi{nO_JSVF;&k&DVC%U^YtN8(G*E1#QsC6`h+l zdwCKdE3RF9@{`Fy~|Q5G?`bYEKR+!e3yf!x2q3f&0~9YlMDOEqpK zwP#=bHMVPrF}=U3K(fWPKVF+CF^zIpR1d^RFAT}J=&x!?(SYZ z$MX2jhk0iSjLatuFPT++>D-^UgG`Ay0-8Pcy*0LxOEUJ35$lgpY9TomdF zcEMWg0jCTBRJn5D07Xgbp7T5hc&pJ3a!wm#Ri;&&Httb5Y9k!h7P(z=L>*`^Svq8T z`}BfQbS43Ce(f|v;ZXBnGibRxodbI^(6l}iCQ#q=x{VwVRP9A>exBH;oM0!e)?n#KHaDvCAa^}l7SxBWFD)JzfUl* z&$6c$S?dcAcb--z{~g*ken>~*G=qWoiMbbw)S_zfpqF?gGmDDew{UH9w#-t~2A#2u zIf-$Ys2N`WRo$u{T$M2RFR^2OSaCZ)V8%4(!Rey4A09K<)mVOCpukZ%DY#;sP zkbr5dxR{ipeQx5WOw&xHDL#v2(z z=#jHQ8F$?ESEb9x^PXh_^D|B=bCb6Gn0yt;liWYJvS~0~X2x^($O4hcj-{F~p#u)> z`lq#-7kWKCwqdTdr`pF4A8!P0S6{3UWoiG=KP1ZpMCMq3J1M~~nLP8c0e0DsZ3qjm z>W3)$-@w?vQLWCcS~r+-E~r+Oh>!iQ`)5pmg}V;I<~cX8;SZ7))9d18Q*q-j(6c3! zadQaXbAT@luCUdZ@+-}k7f@IXB{=_@V-Ud>=7E}EN=-xbY&kcn)!vXoPcYUp~U*dkfe~3MI~kZ z&3V|ZW$&Sj6|L^8bPb5i-0b|~bJ`1WD%of<0Yq^8=+t;8&3zpoW$WCd9~k!X&Tesx z>oC4=rBo)2e8e5Ro6d3Gz7|3S_BFSr5$Q0$JK+63r;QuM5pe2vscH#(xeDaT)X=;y zT#*t5gfK39DjtBN<5}b{CT?5c5VsT7>e74h7(?i(u!gMnDNh$to+#EOSv{Hq-a8gP za{Kb%_Zv@+bH6A{#vHJWQ5|Ev zM~l~*js|U}Bx0~2C49T=GxqyRK8MXKg^ zc06Cska(|5PO{Mey0}wlXWE#)IXzmaAuPm^?T>peZcR0f2(t;_T;^QgP98tRQsd7> zL@kh&w#l1+S}lX}nDD8kbdK=r!lFJXB{#=T-hfC8Itqq3`S9w&{RY@iSd4-PK9=uz zmhr1I^4C7k&HX@~@XTChF{Z~Z=Z6{BO-qyjm*+kCHTM(>5@t)%-@pb7d=80Bdb=06?5D*nDGNjjEKzGxlIIyxVK>@Ds=$6|g;I^C=)8Fw@+$?~2 z$b$G`wd3Or`Q-_Zvs>ebjrIue5PY05lUuhm^BLD5E%F(dE@4(gF}Fy@C(y)7vbwMl zno@M)6!3v@tZG;U_#75_%_0j_Qe14B_smd>wq9b}r-n=jFu6FhG;l;hAGqZ94BDg{ zrBRrHGTctK60q{&FS!=MhIeR$Tq<9a8X>cw6jdbtuHOcKN_Qu624~YwTi0L~=Q>*Jj`7ad*DPx(lLu7n(01YPl~ddShp?Nr$!mw7cm zJT*Jb?7|=Hp;9Pu)7L{51c`v%YjGLKx>xx45opw?E+-r{+)tG{=TcKyZJ=RZC&#r^ zTD@#!RkSXQoWsmI_C0;MWK?VP;zUEX$x>+Ov3K8K7bfVANs(6)+e;D_uZgYr*^ zuUTdnxay^(Z&p_t>tK?z8HLyySHI)gE51vk73HjtF)l7EkQ-D!Y`mxzpo2Nck{+=h z9D49D#Wj4`0h#__#^_N*h_j=5Xf{W}%2^Zs=++M3+|N=Md*Z6)yJ%$Ufc`-IY|iaE z?5YwFLuJ{#b5uiTB;j#(a4@kh!j;g$>+|9V@U6d?xFcJ-I%4O-S0bx^p=G?YvX6_E z_84zF)@n@uaj!A5P|E%M{pR7{)Azd~-^q)0O~Gdi(?97$Hg2+`129>DGNSL06w_5$11-yGvUQ835xK`GA@V`eEQ;e%^tpTUf*^16Le(az0u94qCCw>s3>?X@~??VxqHAjqrN5iWQI+Y!?yJFOJ z|6CI{zmu&g+w4@e{Z9VzUT_>RLD|K#?B=zUX+-$Vt)pF^v?)JlZ+Al^G^Pie5 z3ONBP>q+lSOU(!0j|3vd625EKtvoHamAf%ME%r+Suj(La;L8$m9Ap}G=*J;T7jPm2 zz_&I;BW_T)G&10MKhJ{uGdYGZPdXEXvZD*P(U@o+xAJ zqg)&_q4-Vi)5WzhBLlUHb-@+u`!64BTlU;VOg5Cjuh-0~bflIWZ!!;)$&@$%atR<} zCQ(ceDh3Ob?Uqxr@%dh-w8?}InRR5jH9-oLcUtiWlJLD!Q{(GrGF`@)UM*+NU-ng# zBYeLGzrZ_KMK1)`yYCS6aXSHX;IzR37tpL_SbvX$ zQdhkhzny^B2{oUIN15l+x!=H@efh}O98st1{IN$M107G|d7_Q$y=|8O;Mbz4F`RjV z^_v)WZx&}WXCH;rug)7tqv6p|nESP(Eebyyk9n8t;)r;tetpW`ZgvVkv7(NmIxli_b9oEx2PAm-RrD2^RcF4tgP8v$~wjL}B4u?V3$zNWG(Ytwr zcM}3P6f*?>qI;4g2%>t>N2~*qAQsQ9lKtM5GIiibsqQlsz=dxJ#WXzmTP`o>*GlR= zJkNmap)IRmnrdOVZLkTRL#Bm3(QMf4cxTg=A1^AOC8`^b-A%$)MXj`#3}LFG)^4jx zg;7z6BPZtspV*TJ!qvIjX>DYfBzV}uT=in&*&ly3;t-!`^%H_j>BrD1#&*DZ-_fYK z8j<1kOI~groDJwMWr959IsIKVU86hhIMk}Y)WWq_Gp{C&OM6L$ius*z2p5s1+23Pq zXmFvXtewa3Kl5FO=%?Q;=3EL4!L`gJ1PHb(nsSN8*o0&^mclvhp4w&|k5OQtw|^A@pkYOBlvKh4e#^`WD{{tB<|luLnnoFQ19ql?<2-4xnBa~SQA*NH z(&u1XPMVr|IzT7x<)bW4mz9Oe#Q%{s=yN9B@RS@2TkL14%*5w5-|oL)OJ1y+R2C>h zbN;UL{rVycH?qzYEHu5(oFyUre(@I9xPaZOW4RWwxwu6*zfG~~ zgn)Q}b_c5nG;fDt(?Oo5;V1d3cw<r4=BxAp0L;MIVIah?Bs;xtKwhU z6`Y#3`M%4FXHtt`@)g7!QZ#ivZ*O}p-8t1DAEg_3cFaDkPFN?r(cqh+du{v^<`B@U{P2Lfh(JNFf6%oWP`Nv*ED_i-4jowBJCLQ*LcTUhI{|oR|*_% zNo9Pi<@tGWM>6nuN5Dv;P-1OBE*^x}y^9#U`z9MII>_0xU5D9lQQC2Wm#p1Lo2Cig zATzj2^l=}ktx$j0*;TPF{B+Xtf@4iMW|XGdJ11#LisxBb&1O6 z!fq?xEcJf^-4J3U+e*Z84aTig5y!(eO;K##{iNcmjBdow8Q%!Yv59W3;3?4g)vq_k z3!H*k4QV1DV#l_tmmy-E46%-?lO2Gp=@B$?3hz@o(E@Y(51)CHi+jS1pr##bJ`Z7lSy1W7U4KiqBL1rD(4shHJ; zA%i{EdAd4Ivg^XPJC4iIAOCHQqlUT-<21&9Z#g|+`3_n7s`yE&gj-vr=^>g22l;riLFYO*Hltc43ABIEEH)BN>T`bgmzoN= zMlUdwG&h=;h3ZiHIVrc&GBN|xkJX+!rgvz<=?3poophb^xY>k_^Y**-q>|S8CU1-B zQo$<6m2Er&bVN>EL?WR>#r3S}ic2M|MGe5x%yD=lMsDys`kVI%HGcDLn!?jvo6{uq z`VaE)i;raEk!^~<$|HxUuH%h#>U;&b)t!sy9>DR3>!T%`Fj0!(L_I;LM(*$6lCWYa zChy<=>h}0^;gib&%ZH?iJS|)8R(zEx1KOeomNLfARZ=3HILUH)z!9{odG@e|Ii^=f8*+{$lyn zcUGJFFID}MQLnuW%uH9MkyW&L8-F%TuV}^~9oC%?r#s#=WXGz3t5x~2PRu*r$LfBc zzq~&Q$Lja~{^_FW^~rWD?wOA1C4q?mqll2G9cL9&CP}uLVrKRKU?w3 za3E6CXi9A6P3RpAag)4l2M@x*NxSX1NqJ1jx?6OscZ!qy5w&3Wj^RrkbSIwoYejwX z;pqJF`XB7*dumA3hAwA*F#f7XKPQqv$+@6-APtP8zH0()J# z4P=h;qR`^==lz~dt0H^6Tqn`)nqGJMIWvR`CjRtgJ;@@XBeXOWPh&g&0W;r|8MfKn zUm_SOv|KHy_B#KZH>Dd_dy@~;V36k9opa8fmU)nDCdUPRY?07#CX--l)UB22 zC|&*rFpfLDl&AML_X>_1icsK5=a9~RLR_}p2-JKD>J3}5YYtAUvd(=Uc0>m&cb<+v zd${wCb-?~-($HXrg5+I8Ylf90b8~f5l_IAtw}2bzYsAKl;`*LtT_EnZ)zr|YEvvXi zg21OHDvt{nJ)|ixxf}kDunS^PtZ{r#x1hx%XT_chYvu{`C9p0F zN7Prl4GVHhkNtzl=E=7hgFhMIZVj)G$1lB#CVP%alJ~ns`e#V0IY30(wz3-wal91N zVa4<1Nkp!UTr6q9`47W3U;TSorC%SM*~0nmrEZFu6+NH-}wgz&E@7<_7%Vo z@7&{vn<~9gbU$-~zATdgjM~}dt^z8vz_Z`(&n+yEt0Pe*!++%>hc18182`ztd4KwD zi3@Vo1HRGCWn~7ZwuQX3hxTO$uzvD;z5fph7QI7XWvw7Nzc}-YlC@iTcw6}z$0vq= zdt^L698>@I4-7V!N40?e?>7v}(bwNw%R`FGj}v&ESlmOElsCyEr@u5UC#2^T9&|qn zKsJ4EXFH6O5YpN#W`EfpwB$We0g!WQV~~*4r`7Qh)*U{m+Urf>(aKY82RL1(Nz;{1 zFuRsH~1pVl(7h1LGyJS%Xe=-t{F3Z|C})`o1%_HAT#Aw7Z$rjS^MteviK#SNhpoSXH(dq`K`C zqd&EoPosg-6@_wmJ_ITbWUZo)eJ=u<$Ro0fH*ATEKESRw^wmVKVFwFT?zTPwd*7hi zf|vOMNU)uK1WmSC`=@JRq`ps&yn8`s+p&kg2(L?badA^-qZ_P=Yo%dFfJMIJU=58v z{hA0c8?q!71A|`jXZ7fx;OTMRGKIVjlk2r0Kz>$Hk;u<-1Q)#L*%daWa_M01gtO~A zhexZEJqU*j!AAC|#IP8=zZM9dn5VF2%4T@>O&T5i3G&1G&vy3EioAt;wvV{@n~$|9 zNE^41rDV%agYDstbekXJvRCMeMWJoa_#Z~VMSkLRwd0ZTyiWF;GCQC(}#1#koR>aQ^3Fe75s%48(PQ?2SWC5=dBXIg*;tH|jBG(``tEXpNr)2dr zkOwxBptf4M0Og1~R)uM>FsPgfKjt6rfS5Mn_{wgEs-4yaJ$ zG}Yekm@r4pH;e8z?#^Q8SiTmN=oQ%vOBP`k)MP7m|6L2~ z#_;R=)#n2tBJ<~aDI5L)$j*&%8P97#!1%;q^<^NW+Tz32nA4eHZh3XuAJQCzfyiA zc}=$|y<=F3iLuCbTX19YLgvZ!zT#qdxv zIOyPPzmAI_V$x_TV~665BFb(-!yiu}QF-FVd9&fC_pN#5|c|6Ad3Y=K>_r#maOv&A(h(JJ6B21T!Uo`2%v zb%od(Mlv&bhCMl_aYKNfE_u(){Hk#WjkAH8{ zzt|<#=t!9RDQNlrm1c}7wydi7h8VL-27AEQOdD0;p+{LVGl;#MM>S{g57HF~7beVD z_qR-I2^MpGrtS@p8P5{@5n`}WC%Y8!V!aHnq!A)?RdQX%yb!Os)P&vyA> za{9BNes2WHtEws^uwt}}^tTJU(J<$brGV+_zsEy66Xgv633+~KT5x#>{CU@Glk@-S zW0Ynl9JW!E_3Q!7B4 zkUopOb6JHJulN<9qI31|AfrRsHivefR+VS6 zIZl&mGo|_kmT=%G@7&0xGea)Eb1J1B5?76N!d6% z0P!WXC*q?xB7^0`TAY9|rAG|%ZcE*HSveoOm(G_a<^qn~s*m~ru#Y%&#!p$aHgz0S z1+ZfR8nUFPl>;BaRq6J!qS(Fa!iuw3ezP&#&MYhK49}3!EEGvmdBf3OU!(C#;)SD+O(c zz}0(=<8~kc>(^3mQTyplZqEd>8lfQ=a-#C=;Gko4xo7b=XOy4&DZuJeitoa8aHH$o z%j`U&k1`+RKT3*&i=q8r42!eIdO)FAvu^F*XF*RA=ggpT*FW-ah0D5bL*B%phinbB zSj?@MG^1I1v=h!WgDUPIMNj{H@%n6_4dC^elNeJdNzKZrJ(GbSwMrjo%sMRz)Aw2z znuwUafO<^|=IWtaR#CSmp1=Z14R+lM8#`l#Sjnq|b6XeRxty5cvD}L>p>r*sm#5!9 ze`ov-!@YXh0#*!%OGNw_<+HX_p)Z_cu?ml;bMms}Tf+}npWI1>;8D;}Kk$Cd5<#yG zAFe;;UXalC9FbuL2cTRhylSW4>=bZojQ7tjB>v3XRSf6d+TP}>#LTlf6?dHdx&JWu zL^X~zO;>!bEA>+Xer4E7L_Ff?>kNeIr2Mc9>FBiwr{%9~&}-RF@>({^DDc`a_+#}p z^>mD|&oSDZU2A|G-mJTOfYj_Y=V@3x;ht!`)iR?5?k9BZ{^fZi1Wdwqr@V4Z3i_vZ zm3PUM%p9nWeNu6RGqqxU$_ zWIUchFzrAe=|M9RMv+lI7q4k+sstQ!*KbRL- zreW6-dmaDR-7?ooDK9Q_Vo%W%?N)e@E%S06aq$+o$R-%G_b$hC?iS+s`Ab73Fy{@GR8GZ7xHteg zG$cb(Esw{z2i~N6)F%1#o{M_XBa)CA{`SE`{FOG1B=KoVi3abA?Vk!-tDy73X%grg z?d|E5O#?$c>r&_!B<2@)7O8EevpOaFiX{#!0!uZ>YA+U>{ev(bdl`<~jZh`M=}SjS z1dBSLO-GF-LZQUscVJ_NB$nUs&&iUm!+&Z*-_jNS18XtdjkY@d53HrE{gAY^IleuH zJW&fzL*^@n=>Zf@Zl2ss)%LFrXK8k$IjU@jkG88`nJ{A#DIFjDLZj>AFLST?(pNvRQDsHdIJ%ZGBu2XvjgU+FGq6WAnzA zcL`?SmwkF-;dPSOTA&7H+Dj6hdQ!ors+XZeu^;;fRA>4Qfw^tl)NE62Wom3UYnvlj zRt6^?b*b+nRyK?T=^Hx^=8eIK-ExNYnDwl7uX9r#S?|l@<#n`;l*^V_hwQ841J-uC9)!BwHt7TPG9sxp$Lk60}IQu`m5PJFhydbMwMxkXifOTo;99a)fX<=Iy8H!T_t=CogPY5zd|+$%uw8T302W z1_D&(1P%l4D$ig3Acn4ORoj*9YS^O=h}qqrhmzP9kB$MQ_voU{x!%CXc<}>r8Y4sMlKTR+`0z_${;h zGciGa;Q*}AFzw=A`JC;&cIBSbtNdnCUmZ8cE>Yk@64=4&Bkbmyed1G?00*O3O%#O` zF79+$5+^)lReC7Kd6HFhB2W^K%`^=FWT+yW`^BapZ~e`ys`El?8@vxWK3vJc&oU|U z>UbSc9qQLWQ9te-jK|er>m6>!S4t}@bc7&{8nEjLw%|DYl5W(x`1L8vYG--95w=Ix zznoKtGEW_SehU7%Mv5#uk&cEQ_o_#sxOH>GM1*3e-JKQ(s{s%qsh}dYGq}b~Rj4Nq zw8gcz7i4j8c|`#MU!x$G)gnxh^6&cTGG4i93p!L1v~%Ub;UGDEXvN<5zt?!6@!oNT zu+frx#l7JVqrge8M$!k=45zmXt(96Mz~#&)s3xEghdbRE2RBSNvtMm7!!(Z>t~ZCd zdX&#QYj|}EcxUV%F0bJ0n-wT|L){M=C9}VARRY;J93*S_x0cfFV>TqgUZWegT6uEk z4Q0%szZARH&pZKJNz+w%;m5Nj&@jKJin8;9{D9Efa0D*&cyDRE6U%Rj>^?D#R{233 z9;m+6=q?+4t$!E3aj%Fya5psd)EC0NXzGg3Ox!9 zb-FYSf2$^i*71}qP~3|v^Xb_A6l4mf8<6}Cd2=AW7y0*RCi1VeII0waqgNZ`h-Cb5 zywFQP7C7e)E!)ekV$;-$cWqj%b~4{i%UCLuEGk>qOE~U^zqKy8r5qIUrAPrmxS{3n zC*z$SFzX>{ihcK;A{cjDE03E%M~`b%R#>_V7NW-9&vEKW z6Oi@So2=W6R11~eCvP(q+o*r?X=D4{$DCCj};_iJb8?=!`ibC-YHKZ_h z%$NAB%E-g<56mTlGM(A1}y;mKa8XWR=U3=%^?#RhZ z;+>PriwNc{bC`qR`bnDPt1Bq-zw9l&@TzfLp7?i+4^Fgu9o<%_yJjC%`SZOQx>F`n z51egPAJ}gFezEDxtJ?mRuJ-S8 za%)$&trz%(*37|rlgK5(LO!adioJYL+GMWO=YfTUoabc|VJbXYGJ#R1W3c9kR~hY( zx0VldkiwV@ENJf6jmjhPS$YWVvK`XQM}5E-*oFs3hpcm-k1r9XsEcJo2(*6MD1lUU zd2({nSAm75YqmM?QLD`zTj7Q!EhObKz@JfC{3{Na?Oy{gnq3EnGDu(_>hvI}d^oSD z9{QXz@OJyBI@pRf$%$U$S^MtXQ0Il2KnE&4h-AzTOnahJfn`goF&XIhrxF~1x|F?n z$vdZVYI&YJ^Rf{;CgKN8s|q9tk5XYKHhDF>YtwQXRW_{t*>tb2Nz3Jy@== z55C?B+R)^5?#Oky2Zvl=6HJHg`=-$pdpdASC3tUqp{bIlgYoP+-W=3-@l}#e&Zuc~ z&U(+H>YQhMxYvo7Q-rNrze&cWYNRPW<^3X!Y0G`m9;g(yZc!xdnUwDSci!9k-gs;4 zCGS0>d*V~z`RF-6t2}w2W(}NO%LP2_HX8M9hU&HmWOKhoT?tCYbJ_cG(Z5*k<(gl1l0zpOe3 z_J!tYahk&>o@6ry^z_;P96I@3-w@xC(sCRHpX5FQ?RaKg34*<3ckyg?^t{D+A7$SW z3ylx7L$@=%ETq0Pyf1R8Av<2H6Wc89aKG1)Cw8;hv(T-PZ@yX` zGCL-oY;3l2+2l%`(*HSd0nZYZ{?{OOhDbE}Pjgwozt}4L%Yp9y??1e=gGnUgk=D9c zDP0?9*)XKtMyn}3TCX|Sr%7JJg2$Pzg@Hsc4rJ%ae!Bil5W|u1$ho6{-Zn3`gH>id zD4B7mB@o`w0PgI@Xye%PG9*rbQf)ZINpTx>x9Bn_f{AWfD*bk+99 zRlH{(4j4WOweM|tkiMo{bt41(Zj=n?0NgqT9G}lZJI+|TZcE%k3}i7`@EGU2ytAWT zPQgLh-Q3-BB5E5Zy`v>1ryjbwWgnQi()OF7bfRU4n7XiOyomuO>*L*xddMO!3FXkE z4N5|&CZo!%kJL;nYbQ9zH+Uo0S!FP}u0PyRUJKQbQTnrgsg<*wDu zWFJw&$Z1V?JZnwI->tozQN@TH&squ9!e~sU5Wyow1WUoML&5lYTm%C>bra5_>e>9_ z{7hu+8>hP9x2Go4h?YG_8~Be023P**PPVbvp1nddixU>V)p zCve%_kA5`)Z=1+PI&269Vwqx8Q)8|0{4f&uvG#tNTlh@iMihse!jVuUiry9)ReN#P zL8|6he3}iqks_0N&*88=!1cFn2_#{=1*A5-AGGtPjfpTb3=+JE zc8nv9<206VZ?1Q;Le`sYU&Ms_yC3f$l z$h&T&0$y@c*+#E$B8oV~hI89v`=yW|)#E>`R?=7v8Ad1&WIM4z0FtM|Zf|feb66hm zj+_uFC$JrmDq!6~z28iWdAR75ebR?f#GCT0oRWgMbcnl4`*uA)I!Llw#|ugJ7InD+ zXJ0~VpcxqAr`^8ZA_+D7#8yDaaqFQSs542ym)|{vto>45{9sUlz_6h-uHbOG*Fl~Q z53y|>efhsLWiN#O_pQkn$XDN@`g+Bhbgt3u5;tHv8^Ok+EVZHj0l)b*IXMBz8e)WE zmq>+#fWe_<-OZt+y<`8(3VSGh<;(})lvfK^+m;6uZa}yGczTpsJjjxoS1lIxF<26? z;A&-co)%JRt9PlwHPbfjgkWh07%*dsky5vYj=U|~f}bScmkT_JUm^^Qd+Ph(C<}SH z#-?lgf0XW8pU+L_@BC?==70w+%0!+u=JRvSzTpUW4XmnvHm@L?C{jxrW(#XN+I`x! zJx~&fV&zLOZ~?JDkwLMlnboy_7RjEHcq#Br5a22HEIHR>=hd>gUPve+8LvZrQqMM4 za?V>6Sh67eDovY<)|M*yGeN-dtJa6c4-GfLX{UVL%qF&eDYa+yD3R$tt$jaQDmgCr z0!`ABS>>ZNLw=@*KJr`Muj(2<6kStgWqlmjP}QtQ77`k=_fKzUl)pxBZwlChBr507 z4@z5v+!0ZPU69r{RcM3g`Pmz^&K=Gxdw%|@Nawym-#XJ&xfcpO zxeVzKnMCZYt)3&0>aGGItd_gtcC!TQ9T$74y5GGbvWC0OD{FTp>%9EulN;oR@|5I= zJDv=RhrN(aY^UeA{!~T5Y60qxZ9(@p(^qrbM+JiehOpvqNsJa9K2-e*q;*yXXbWC9 znh(!~*LDvbxUV-)7K|oAIHk=Wl})TD9xFvdFBYsLq+0B1DIpQCl6&it=|-^E`k70IDjmyclRIA1_j#{(+XEBjC!m(MSNbFUw~~BS=JQG448>dzul8Lw=djBT+?ur?a(~s+ znaQ?j7|ykL--q^)_wEAm4%F}Nwh+f>lvRVj?|cUdu60ng5Hb~HGd)YA0d{k%x{PnZ zCGp#bv3kf}9}UNkMDB?6_SFm*AexV}m(lO+T0o zvvP6X_z->(oYFw(9WB(Cc=NlNR?&GDKN@{Xy5s^|IXenyEd0> z>!uvL*|J&!3P~U1JP3A$xX!4fL;2U94pVR33l(J6*!YP^Bxi%*y})2|>ltPg@zJU$ z2C4kAb)N+RZdoP29;Il;G3#shtpRRO=|HA_I=1KJ10zZj(L%z0#)hgNmQ{YjEKRFe z!0%F?u=LYk{hNOdrP?MgL33v zepQ%TtWhBUda0RNOQeP+ew_57eoeVz>fBBF6w_b7pL-+d(6zbdQ=Ue1j%W;;JhI>`?(MmK> zvo1VOYCvGzeK2)z9cdsN(?kyoT4^yC=aKFB=s1h^|8u-OuMkd>H~$?FjOJfNk1zHu z`N+jW)>*lCKOVmvpKzY&c1B}uT2Hd|;PlHF#rD0^dL_OJUsyjtT$&ce zioUImNy66z{dI6TvH8u5M&^#UBZq-qNVRpjyEGUdbvN0Y`wPB{H2Vctq9ReA ztFscjvmot;So3Ig>_@{q8s3E8yT9a|@7)R7{m~^wjWlUoV0JRBWC`|h#LEGuBOOz6 z1y9LWJrNwk6i=8NN&3(we)EW?(OV9^aMQ!`hOaJhH~!Fe%HB7S{@L=V^ZmZ@|D_08 zCNcdN3=s5-7r3fO38mw?n=~bGGk!V(S2z3sZQGqTl_7rEeRH?*^{<1{ab`E7{Nb&Q z6gYE2n9USzt;DI3@N#y&)`{=%dGqBqynLccTeC|LhuM{hk3s-tJ?IB!@1E%@3lXKb zAw^6QcfzDZvM@lB@?wQ6UEANv+TT4+0-gx-lsAkRZ)Lilop`i9pqOLxc1J3=DZ*I$ zI9+eodyt5yrgG4ip#T=D0ln^_x1AWHJLat~HvunaWs9RIsWX%7PDA83Z|yJ3Rd>k{ z#u(GQTV3MzQ4F~6DjQ0hiZ zu^>KbRB!3rv`wwS7~;)1G0wuVH8Q*g2uK=zOndC?vu zOSBi_7uYxR{&&f>U;DX8;hx#19fBy#+FaSnKJbQ~u7DcEzvpC1Dgk3g7WY4H@=82S zODwYf9enZk3NEs#J{l0CWo{Se;`UZ?sq!1}oxZ52kgtAJtw)#EH3Ze+*^;L{y`KF| z$jXHf@Hhsw1}UP-KS6MKtJqiE<@lGZzf;|yazzF37+3bz<`^}TZ8i+ zfm=fZ`aWk8Z0bx*>kkP6ibbhs?^Y-93-(Vpp66hf=DRofl``t&AnWBb7{(7$)qHD1 zIesnCi=T#A7L89thCLJ3hA^F!jG;NElOl;hVLJ>(0%0a~y(YcLo@yXHR=!&+-3g+O z+2~`5t_GdM_L)VIIjmaYCEH3S03zSL?baP9DZmNq8b6eSazq79`CWh08}%N^p&0&Q zb|VabDa}^oLDY4pEYUD&q>_dL$NR@`Ny@9V8$)*pd#zZ(0J(-hJL~Qb%*5??=qcD) z!41PVJlpk+4}D7YYO7U4LKp8{i7_3|8OjRSNg*1*Ls_toYvd+_DpoR1tphtKCEuJ= zgywSp*#7>cpOt~e$QoMcBjQk?7=dIjcOUM^=?si;f#nPpV+UR=YWP=R?mjt;*RV7+ z*1S&2$PZNj;5u-F9)ZDhsCdIJh5A79qb|L(k*FbG`XOagjg>#-M)psBYq!VMza|^% z!$K!-kO#T^i*@YxJK5b>3E;TnO$hGG&-=7w*5SrsPG)hRS%1X*Rl_`MgAgj~eDWCF zh5qYk^Nm2NBqH;UeVb7HK*#p{)hS>3mUCg?-IF8#G=%a*=Slv`&yqQTPrK|@^gJk6 z>%28b)!aB}rvl8x^nn|Ce+e2^yQKpB0RnJaxF5uc6P5;IprVF$@VU(uxyX`TSR-^L zzr)%+R3VIpetBju4}q4PS9q2PfygZj#dNGZUT2|K<|nmZ5XTuV215=EJhkNs7q4yN?) zx#pbD^Lc_UgnROyd-CWmcKXmD{)E!Y-#+q!7f871o9%jQ2yaWqfJkj@Om4o;&0lNb zZ*~UysETZ_LUn?inzMTR8Z@3AmAupaWt1hfW2&$l=McWL$z+=3oFZ*8k=lYz!Bo3DE}M%A)PC=4dM@4PKx_L63mC5Shy?);Wg7F6Ov z?D*bFE~uRTBfSNPFla_tu9mdhV*E}iQ`6__cKZpQJGC0FiT;A*|a#jj>v6#|BRI|%KD`iZe0B>1sC(nE2_XbMxlh{cMY6eHEKHvCxi#0uLC*vT9#QG`|nSH zAByW0bzu`Nx^VV04<&aZ>LDnb4^m`wXS?}HaNm)^(|=W-*>TqLqzjr|fZCwweMU7! zn^`T@ugZmIjw;pM++b+ZJwPTVwId8b~ z2mLd2OsR>L*2s1`D^a;+jT@4Es!2+C3Cp}h||3S7>e0+zhgr= z?`b6+!yl*qXbYOpw14%@W|@eWijbicfn}$43E8{SqFN^Z*(*Z`P2oa9Z6m{DB-h%8 zHK`D4Df(q?qEx=ETafXO)v-6#vMsBpDzP@)!|pA?jTKh&PJfD#S&eH!5&ei=^7wJZ zk7CVjc!{x)GIgaxH#=OClXMct*dEzPDb^ay=ig6|7meQkie(4f4LZrafLCZ54(7=tCE5zdMuF zjV?2us6!R%K(4J-hr&Ctqu?pW{Mae0?sWKN=aNh>OXJnO!k8$Lr1TYa3AT+Ix$<(G zmxvf|e)YHeHpfgeP?Z@dmT3>FQH}H%WG;^rLO*{&|4X>=2fG&!KUr>Yx=~8j_*quN zV;U^^RxvoDAB%xO-+IyVpMaMelhjYOs%^gCN2u&?oeU#+`tqi*nH4L~@N6 zB~d-;ug_R`NpxEFzDJsG-+X9T6eynydG?f^1@ZEw65CC{nX@DlRz~jlbUOiV>dl5w zoV&L=9{1$xmRX$C=u@_JA!oiwSr-E<#Ztct>~|`YVv1;aiZj-?$xXUR)&2dxvGUL; zu#Nnh*Xoi_xzxR766YB89E;OB@gX;9jr|hPgT-#Ec@X#0R(&8;q_U}NMLdEJ>_;YR zE}j$)Z#jOU0fd~}+VfRcEw|H^dy29h_2rRLAaFcei|55PFyXF!)gw<(ZNES|W7Hfk zyih7UsSjTrX~mjRwskolL|KKTW^1Qk%Qt9JSh}qh46#`!*K&(LXHfPu&#FK;Dc2?N zsQV86O2EyvwQu9Mo9ad*b+^88M&D5)1%N7T*$i@r9Od8jTL2E~0!LLKclHENM&>X3 z?h!dHTRoZ9{ybLl{o3$>cti^7!xkk#>#_Qc z^G`zIo1-yD#;(xs*g20&?w;9v`zK>bQNatUz}MbFjqD3G8&`rl=)bOc|EWU#A1}UG zb47VdnP9o$Ng^(s;%9%Ztx|`5yy$0Zm-E}|tTINq&xX~t#q6m_|L@54QC!KXExxpt+ep| z`0p>8-HiZbP)7VqEXu$vuCb=S+N8IIifbQH?6-P218)*DjhU_*?N#=T94d zS$d2~CE!N-mc|BJ+wcLAJh;%}d{RRd8yid~+aMHrcCba1W|g-B;;sC*C&R-L^2Y}9 zbhg0D5q72X_?+|atN7KoGv;kxgs`j^J$NBIHLiB}gZYd;C-(i`Kc0C}guwS_Zr^UM zI5HjgtMgxg>cZgisz-IE$96ngX~?5N<1z#}cDmO;KOdt9W9%jOLx;5;zPF@sOt%K^ zhjskIuEq!C12rpH#$y~Q3K=^bczd>Gdo_6d;IOfvk}38MM0}f!unrtv^1XRN@}-i*YqJWYoxp6PFsL#% zyKn@3r)}D)?quEAa=T4f!5LLkLS|fH8 zC%fP2gB}Apk>E1dRgh->H=Bz{I8+Eir7W*z4OR)o2drRnfd8FWF+?-!{OS0g%}Q7u5HQj#M;bEgB*e_ zvj0nyg@pZ|dhYtzVFv@}G|6Iq4duC)N3Abp`Zj@g3u#9YrK>#PJ$2TUF0OYFf5bpF zG3gh$EqeIj9xHa4j$hAtLSHg=rD?(Yu3yl>sm!@0V{-MRv4MqM%CoJ6;!O9S`rnIr z_{DV4dz<^rdlDWz%SYiO`pDabb(ngOv-U(Tp6KU1+>F&OZQorv>Zby^lI#MVS*z4$ z`b0}WG%udJ#U^w4v^L)+cGkxS(mLGS_lk!!g=yHy@s9poMZAjs`=Br!vSF<$ftVNO z*$Li*2h$EW9tBx}mkVr0^?qYh^$Um;>dn{o9)F{hg#pigUd=9O{CJyG?@Ac?w8_0z znER{{>z~%VaNyNyBcIV!@LoFtF3%QMFA};*{hNGZQ{4Veot!x)QH_nH;s&DyR;scD z=DZ)pFY!88atg-9+*fZ=^K5b+lkYE;Q&UdvDT=0ghEZ3o-3-hOK=5(t_zX`6(VXGj zFJJV8V_I#RFJicmG`1?dewVY3E;L|`sl=})MjS164uiJBU8gvBl)YN+7Z&viqw$U# zWb+lLXBv#Tu*Kb~jq0fCT#!Aq86hO0DAGYWR4c;u1iHS__GHYIpqnyfE&LlKe>fB4H z7ikIl+AAnwUl=UaQbJE+mq zcYJ^6H=+liTDLGukvUD?bTX< zj?F8;UFbmGH(QQ;XfuPx^ov^74iT`c>!c`SL8$nWl~r%eZkPlu5uo# z=^M(S^%w&`Ez9QO8i=G=ki*V5l)ZiK2>oHIun=A$G;EUEchLU##(P)|q`N*AP3Ir1 z_ZY94ecrK;*Mi6|19mU#*cumVe~v-lDWW0yA;c}0z8S6JXi^1yGL-k%`hZEPd_F}i zdtiU>PYI4OZ6=+Yc$wDP?>o}&ob<&ph4xk2)|NLmZ13Qs;OY{6;+rR)7TZ(&#N*bZ z9seC^qM0bUl4*`V9?uB}YjO~oVdsKR09T6=95IyZtVULyc)zuHas)=DMOZU#7`dK|aZ^0{V-E#D86NpBV)|jEA$w0^McZ z$#3jU@;tHm2b+#qs|qRE{qMUQi0*GFtFL}gm!P2RRLH%dPj7c^BpX`A2Kq#=A_fBo z?aAM?zjbA-|EPH@$O>}gtc$NGCNu7AAMf=Y*b4xa1pHL@F;I>{Q;h^gD)XN|0Rtu5m6srJgzDV~j!&u9}Hgb{8SQzn<^N`OH9~hHHMEJ23WB z#+Y4oRTD~>;!K)KvHPGgm zv@lH#jfAaFwUtYXd5$TK0(oDv0z=Oqso$u>K8fs)8-9N>+omjHG89Lx%pf1w61I35 z<2}N3e7K7BJTuI)lqff~%gZqYv@%ceajiG}B)t_;98c6hMPCN$DkBrBLr)gbt5Oy2 z>0sJ><>h>5$IZ!tZ)Oz0dry?*oYm3X2Cp{L1e{dbc;?RG*};y@7o~zh4~JfEzU+~j z8Y)V*pT3$VQgUkyaE4SBv(KYGqi2s=_)Va)6l~q9`8E|Y+b*y+Oq%Oe8YKMUiGR!8X2U>={_q)`u)IadIPU8Vrd(4-)IV(H%+ibscAS2Fn87YVxZ(x&L!J%% z6=H*3RIcKU0ip1X-EYQEveTq)dEzH*j8<)Q=UT{sZ{#Uqn>QJD90W(W^IbYR1t8}3sBhnHxNWP*26$h#=FT;sO~Qu`{IyeZ)slmGNbl|-@`sNI(X zvyE7?yCIGK9O3+_QK}Ve0s3eB>--P&+0zSVV$a2@t0p0V#CNEgtV-mo6H|9wLZ z=+{n!L1X&sxUS2gyi1;%Mbm`CZtA`agru*j8IZ{h3AskPJY#AvI-v#85?Y*KRjO+n zRZBkPU;peZPQZ5zf0{RLcMY#226US~ubJ+>^eZqlT~|!69%s-RsSqp%?@Qb}hBNB| zF=pfYqc4bwC$*;wXO1{@J$pN7TAmY}wf9QICEZXTjYM9i7g%VAtS=Q8a>@%=-2LSvny4gq7{Zgk%p2$4?}{x+ zly7msc^Aq;=86q2jx%`g1n+#2s;r=Jtj-dGD95b7iRVE%t&cCVtC}XMJ+l#@!1xIA zjOFTd_BO4sxUsj;UWl0)>%!Vw!=$kR(|$@{IsE0~QuJNUlzZ+%8g<|wz3+F>9{cBi zvlA>cqHZWWshq6$A(hy(e%+?H7dIUazG>-{1F$Se6>V*Fjm5;kkiWKN2)^p&;;0rG zBsNe%Gg~Oc@GaoX%^$D}S2`ZxG07QER7uf+Fqdx9&bj1Yy;)ngFO$NV?|5l3S6H@% z^yj>n8DSg|V#NBLPh)BBRJr|1dF#})dZ$VdiKLh{ES_#0It;qykGt()8nFF&(IKXO zGL)h*783j(%+N3ysFvJr{F2K)9I^Siy>mOD?_!$$#@8Nf{Hm2iIqtdEN9myw9Pyan1!yaP|)3VKO>#MTmUhPdNf z`HSIlZJ!Lz*we|s(&6d@)PFcr&VphdBwcJ!XMdLbQH_jJ0ztvM284{9D|d)>b4Sg0 zxQ7$|Q4cgco5>WB@r{Tv&hBotG8cxQ$FGOymBYTH%e5}4vNyBDp2$C`UHwEwD^dg{ zy2^&>-^66h&B8r_$_24dFa4*$ekG z?MTP(vsR*XV*4S6fg@QelfJ{)k&d5|emboiDm#-)f;9)R6elW?xO(TgW!0iXgP90FnZKur{fs`x^ns#J1BN;VrsLOD8Q5wq&s1fK+z!@C=^1k>;? zc1i1TU?YspurFdqp*s1Y@ z)}*O9_QPYtfS(V|?bAGUosPCe1v&!$#;b5xWeNke9r{hp`hs1$_}sA$J0=e%^;={- zr0nvY0awM0o~FU-r5cuIL(C?_SOcqH;!iQPQ0Dc7`qUWzXN7GYr|yi7^A!cUdJZCfed>n)>Z_-@#9W8d_Kh#1v9vvUq>JD7?5ZGxMp>K13>~#XI<*s(IM+so7N8 z;yr+L;WOJE7D%phL-@Csjf0QjcQOa^XvSrdtQuLM5BP#8#dB zHpN~q{jmWin|}@Sb?!`3h|uI$sQ#?iL0pSOC+r8X{)(2OBio#YEOK@wS^l2At&!@} ztPVb|+_^QkiTktUA(KZE`>qFZ(y%38xUy}ddQ?jv==M0rQi&z{i;LsnqrE#oe;P#J zajWSI)}Hx;#OtI+<>e$s@~)>N(x$+mdlSCEW55WF9gY{|w@$YUX4q`9g!3r~JO3LK|0EefcHg3#DA^*U|fdj44$~zO^sm>9AWj-Ir7zm$2-ai54?k1URH( z5@rayQtIDb)#ORfEFl-S=79N;XlrrzXCh~^Y9kIVkrfZ-Bj1)I$|KQk7fYZ!gbJkX zV7&&mbZ5sgg3@_^Z;5OnNV~QY+C=tcUrUF(i0SS6cvGUxS>fl}cDphuVn{w1KKSxw zPow7Zg3B~pB&E7Mi@AP!>eh{$(c^fQ(C~w=&^9vev%Dvr+RY^&_oVO)BIFEco9h8= zEvnP5soy0bKhD)h2T;QTtI5!dzXC~L0L~39mqaOUEQx#i&;kTK8eky=8jC+@s-ctB z$d(n06n{Vt`T!mbm{^Y?2-ON3xBOxmf<#^zZ$m~mJhf*Wa%X&ZewUIev}7;qBl??n zGLDOZeL^ZcOYW?p6F5!^gylz%%4A#({V_l0dSfVed=F0zD#a_q)}dY71md8!VUR)+ z-t8>!el!8Xe*Snyq2`bkLYC+3J}tQPai;0_hS|mV+cBVwMLxFOF}=`y(HSr_1)9$d z!-l5YD(E+Cl>}30iC^9OpHN|X4|p7RU0`W5%z`5+#^zDSI2N}@q(2Yn+Z}7V_Z5$xvcwI*M%Hjp9YwVO=kwD?U zVTv<)ATzl`V-77j=MK|pM-|7`V69?(1=>0q^2M1K2=5gyjb?nQ z3+jBz*LS@Mf+s%NQ%+&5!{!kO3OQ@qG;&I<*;4)0(d-Z~^TnGW-i`~Im?bLPx_%+C zqa46R{~Z*|dT(vB--Dsujqf(GX*V9jGbtK3nT|7u*hMd;s=yDYH^UIyXEkO$dlx_D z&rVkHmY6rd?EHuOvFniLPH}eCJQw5y2@C3> zn#U`thaZ?jW!aW@XDR(4ku;LY$Yy+U1E=hzCf&53eFZkHLR(>>hH|^Ox$%-eLa-{; zYt@&vFVI79q4n}D^T zZK|>b4@r^z;ZXhRjX%BNd|P|Fh3^S4rFK(> zFwwU19i}w?6iC`ic~Yse^(a!R-L5!hU5>Qjc%MD33s3S?S$ZjL1jP8yXb-?eo>qw> zsJ%Wy2joA>55&3i1m@NvOS0%7OMxNrmZ$+i)T3;|C<3HKRexTk6}ds!*E`lW*tI8 zfj_Im#dlOf#|}P5_IcYR*>*5;SFlY zUzI0TmJqk*EHD#qeoeIWzUJMN{pnvlUw{&nFW{P4M>I=)1De*U1g z=oK9bUW>%0s;jLcy5RZV>F^p%^YY5&B4OT5deO5$!n3mJT1rsaQv~pPI!HV`c;La| zFJj27Z}YzIx16kO@fGSl=53IEd9I3jGT!iaYNNPJG}|q-D^4pyG5^R_tQxJu{#vly z_4stqbn)W$PE+E)RgP`ZW^0_`lfXVZ8`cZs+GKfcfl$=SULGcNIQ!$Arympk*Sw7M z5~Bc9RXE*Fle4)=73|C&_<6o=arX0~P4RbdcOdd;!b7Hj~8ij)<<{^-~qvoy`z)puxm=!)kNO7I!uSYqR~ zv(4)nl8|rlJ1HV;0qhTG$k5N@tyI#6N;1{yvfB?=(VBLaA5yY}v|Gd%N!#}rC_(xQ ze2pkb1n6f)x}&)xJ1G?r%N{PNI&r_RiTLM(V><>>vGL8GY%%m4%BKEcJ2GyKwBlQn zb0JYBt8cfxR^!p2Oh0j=&#n`UO(CNaLSon4TJSsF&WwMGVqYJBM(4NgS zOtZVh%$VnLLWS4aVep5UvIV65Grl~$23~!jm8EjGF(YD;_%BJOfr1Uj4i3x?*qmMU z@wr9Mm(q)h)Q;Rzjb}oBI3^uwY%~eGLrF`08mFemw$fe_L>|`cmf+?N=vOQI$T|~v zZ>A$X3fUus{SNaM?9sWP3jlm?8t>edxYkNIlafgs)O8^axqV%6!(Z%4#w40RL*$OH zzHc*ijUGC$IX;;U>SUFX_`uhueg#=rj4`j5nm`PE?jy`h{(qcnii%xiqWgA!>f~@WW|_m0tEd$W##-{ zPnm|D6eg6A^{rNsDMiYctO%}ziwK7V*3D~Y`y5`Qo)gBH-A|z^nz5pJZgGu$PHXvj z626`>@#_tN1wYekRZkjIEI$3{6@F7R_UTSjv+T4Ja_S>x?DfevCC{?F+ zmF=S&RCs9wd&EVOFuE|rk&5Vw?CF)5XvbqPAHa69pnU_T^3VmA#z_RK6zat;Sg&GjZ-jRv&^QnQ>Ua21`Un@7aO=Mim2 zzlk5hY`;lzwKiJsI&A$DV~maDUWjy{llv(|FoK0$ob3%u76Ag6RncoNvUPkjDicp* zC`u;<7JO zv{GO962Tt#K(64-6P`eZ68V}uORLWoIo;tV<6df}Dv)&_y|h>&vqDo7AjQc47vJ>t z&i&8-J1pb>*tqK?*RNF;8dot)F#b{s`0GY#(7E7V=2Q1z&ckS00b5U?>!Gyd05s9%1FxQPVi*W~ zes~Dot*@@!m78W98~NK2m!C9_5KH{+3!=I3`0<=5z^X#8FN6VVFMWmZ?mr$6DP*1( zwDP-qLH#&+JYii@o8uK&Uz10IIg9+JYxnjiQ^pi^id14g3?bn1M9Mo=K}#&gidJ@R z412Q;2bk3J$ON{=jNH%bdMn4RC6h;O+C@s;qMpnjvdq)b3VzJ6_Ku(c33`d1 zKYneEY3{4I?54TLv zKXUTF5+#fMJf~SyV3A-=fC4PqYS;oB&FpthS6E!JHAouUHTwNm(h`Kwz_Zd z@!8C~fvT>Cc6;HEne~qT_oQOhy@<_K-TfnAeA^Wfq>Goi%BZuqXpblzuz_b9gal1X zZ_qZjeu0{A*0!uLIe{pT-7>SqK~M3pA-f}Af1ut+{M%$t?NUjt95KRS3&{Posph3r zG+m5@U?xnMOx%1!UsMu~7RwV|Z_HsD_m_Mt7m$PAU%;jySEN?#4RYj(Pm5$>7g0y> z(uYR@IpD+VJ(!ALgmAK_pnC1EFByNGbG~F~;KM5SmM9v$c(6VXv!1nABse5=;TFW4 zW0J<-n;#K8tzJ3GVvj}cz%%UjW4F&<#T-H2>#zs9I~exH3pj7n~zb> zaqjc8uj$umtt-}f4f^F4fikbo{+SrJr8=`v({EAX{z0NsC=2xuQx!uVY2Cwi-^aX!PUoCh>I z70H7{{Hh)d%KTn=4NOvs?4#ToS?Jcq57nJ6XpBCTQI^IxqVrLm5_ zhith0ONlP5b%n2kW4EQgHHnSwzFJongexZTjT4 zW{zwyeL#h1cDLtvQ+v|!$Bf3f?#tb79l$?F=E!#U<_;^RoX8V_Y@H74p*o5KiKj@8 z@!m#j(<7-(VPfqY(HC1!S(HA_Nv@RVt60^2=31&*)JlJk7&EQ`(mT*i)SN4=9m-&; zp!c(!9wV{2rE1D5T2+RqQtR_xA=?uU*>JO|pu$v&C=hQ1w z>`{gZ6D2(E{7u~^;V7JCb+*~~;}}g`{C!+!YqI<({9|k7H|Kj>r}Fg$D@JF{km$aClovFI1}?EW1lm99D`)Iq{6D zTuU!sXi{qY%J%8vHvWd5!&cEpc4=X*jg$xR-t@7}PSO4rLy7K(lQxo-Z$*(!sLq?C z&QFgehNeOIU$dosGmOn%f1vKWLQ$Ubln0>g<=2{!TzBgK$)rXOOb#s}OZ+B#X-(u+ zTbuS%IknUl`rT`T;7K`v&t}~@fmd>O;De|_yED3xjl(C&X4{({R~^3eO~{iKdWD6N z{o6g?{yB+jnQ=I?E1KZq(BaBC(M^Q5Sf9VJ06tw{8Bty`E#$pb-ajbCt{dwZg4VDg#)}-dBkaA)$fxKH~ww;gWS|^-3_|E z0W{2B>w**_CsKPGGg&7~Dcc$@x(QppFhAC2Jzq(P2QTD9Tk(JO3jN$|wb}T{^R~KLF=8imFJ`8OKACp|TP20B z8{G+@^|9(47q>L2Dd+d5QCcita8&*@6=o|Aga1^mC+JvwIJ^3K_begmulL1+J?BFA zv8<1zJx*~sGCNv&7IQK%bX&nx!6Oz@?Y8XW=X&({h^^S z*Ti5;x9C-kz2P|fKwfgKt>-iNN#TFzbVF`>ye39uIHzj4zg?27r2f0s{K#;$IW52h zsj;{1c+Xdhng1*?&Nuje)%}^tbvv(JmGvEiWaTm5;j|U_IDcpmlbCF`fmLvRMhInQ z-fxKBJEcDzrq4F}a_JCCyY6k8FWrvY^5tMf>AWT`N7`w&Dp@HU+^)1#SJ6V;3Q89t z&BxwK-wz#`@f7R#9{aqw{mErY&S^ZH(_+0!ay#RN4_DC{cLNoxm9w-Ee&JOQvA+LW zrF=H^VhvM{M3d*m>3bc4{;rxhk_cF(x-U$!&zA(KXv+#B5Ykz#3eLg5EPlB-gMaFp z>XJs$_PO5n2MOcW?0aRIuq&||pZ|g=VR?<4{4XB8fBtXOsX`-3eEM^vPaD8}`VXjZ zj_^C8$t!;8W2;+Z=>uWn_e)G4AwrIp!vC3%URLwoNhV*jqy(!dq7CW1S$}1e{ z{bxk2Iza6MpFf-^JQU-$+;dna*p9nZ01WE=Y86aa^d4|-P}8?}m0=~Frf^iHNhky&TI&vOI-I-ky>{l-5{#)+E*!Svm4aVC92x#^h8=fuTJ*$@6XO)klS|rPsCbr_ zB#$<3yRkPF__RLLs#d~&nD+f`=u&^*fHQ8zLgM1L2lq#rXc%R}z=OHZ`)$vrJ-IV$ z9|lNTpZ!t+T?_Z3->**Ni(HbCJ-9o!^zenKKtg@Vfen~IRGykLv-&2R^9SMEW>MygDz$*PvF(@H?~JVq!xb9bk*=6uHGh8;JQ z%~@={oo6d9(hCFarN>MEOn)@xTmKzAt;YUj`tZ*(7JvO#*=}m<;Wd};lNZTI97|Zd z9Av26xYgB#zJtGfdYTackgc4p2?|&iQ-u*{-KXW!r7I@fCCvS@Drd7<(}#UD?UL{n zXT!|zrOuu;$q{CxROOEc7FLgX83mPA%&YSF+`1ETErD;o4?P~d$#(IelJZwP*ph+% zsEpgRzPnK6dBXYhsc7y*$M@%m65R;mpS-gYYhsB;7BatPB<~P|4;r(M*1*YTyI+WR zofzEtAYT&2+P7y0@>U;C&6=F_1)}x`5H&I2&t4lLs0+v$DLf@F$?5%1cfI!=BPm8t zEC+Sz40FW1J%b**@_b70aJAi%cKougoV1?Kee6JM9o}5*T|o}jP#dogw~84DS(8&c zVLR>H5#jk7dupVIUF*_$-rxs$1w4&?Shi?%J7lSTrLM_L(#Ib#c7j1gTdUAFK!pxv zXc5*P(!*#1XXEUXjB6>jRdb#{*4_KoYi0#4TD8^ns}Ej`+4An4R{XV|(Ecx656TF@ ze>`?;w!Y!g;|_?WFWm_S4(7+j5&UR#^>nEmFAtU7EyP?4(@Y7To25A;Z*CrcBI&dC zXv6;jUHeBzmO2Wj=jLSuj*H{A5(Svo&eY-QY$PKGX3_h2C-2Tg+R3@p*^Bt=a2_2^ z`dDTIzpEQRZL;eamge{mfRC0yppv7^H1Qq#Vxe~dOP$2%YvtoDrUnP@KqdO`BE{pc zi94o=?D19?Il&lPRiwEB?1s!^G+mbLjsjAr#G9_kscyP1{Nl6E`Jbif7~%%TZ@9kyyGtFShqR+Xd1t7x1rG?Ss|y%ZsZ&60MseeWwpT zEEw%cGRR)FAFZevPa@p2wlZs`a~`ot(69RoKrikUz%Gc-RY}hc<{YDJ%-=w4wclU* ztlgP|=~xGWkJV@@THLo@HBRO;+C7nz3eVON8`ddOTJQPU!fVpOils(2E9vNW-A5kfR$nrpLiBH-@`>B=tDXlRF5)fI)|DTG zEfcZzv92;tbl9o?v`x2ft);G0`(-U1dkzjde+B=YAHL&%_;_a(%l0Dp$`8amj8*-( zVm`FN>Zz1P|gphD|f|MVp}f=euQex_8)}9 zO;3LML8lx2y2yT?qzT{99h38#(&pWt}TO0ac>MfUc3_FyNfi`y~LDI6UO;>6Voai{N` z$43s&`c$kZxcI#^Ayl+87l!=&4H}>N&1f->Hr?B3=;aCQYdZ!)@K*&wm`~2ioaf|< zO58_3wR-fW+a}`GX!cZ2WmkL759bSI1-XJbrBl#crEbz<|4R#C@W8|^aBw{PZAGy2 zh(oV&&KsQ&W}xL)+?w%(QhtN$5dOTSp9AxqUB&f^83~n>g<$#1tDGe;t6B*R&}z~0 z8?z~Uwf*Dt2ZkSoIgCb=I_2W{#OBW<>>qm~U>5n0H(H|}j4 zh_Pdj#n~mJ#0H!x9|lrXwV5hGOT-;i70+)m1On2e-I{8G+F3d@1Ed;bG2m%GVtEJX z`td=7$B3r@l7;OBQ)Nhk0?icU=lLVz7N*UWZe>EovxqGb74mZvYzmVEX zJ>MzS8*rA9q@ooL0tgCUI32&Lr1u>GLtETTRDcP^^53zSELnTO#BkX9Id{oSwcw3- zLvns1GRqwEIv|j_gS~d-c(}k?WdA+$Fh8()Fl{;xb=IuxHPpPtZw^(ei3GhFgUZgO zPjVJlds17zyLKQ_CLiJtpL??EOXaz6WqqTVOt-i1N0_@LvppQh> zK(t3B6AHJ7yYVHI!zp!2gp6tL#3WgOO`Z0*@bOh1GI?mj5|t4Y3sqSH_IQu2Sz-m! zJi+xf-Oq9DKNstl#F5q)>qfcumY+0l6#RMm3@RS3R znJw9n;k~ri5!2qo=?#e{UFxfIxbEKNmU&8ezlO&4^sD*RvajB2rqajle__3W^Hb8Nca*Jk*Q73IPRMg4IqWq_(cadGq4_VHB}ZuXM20|2lH*e%)|hr{ehy!X{_ld`z zTPYX6;C8M5yEnWYD!$MO%gRuc&)rJ(UkUhZdsoy^n*;W{w6&$*o?XfPCu${#b&As2 zVgoR}*ok!`HUp}`Mf&OY@##_R6hCG}KjA@Ta~?O7U-(t&0FLGl{U8qzhW7XOMoY~g zQi*kOhU7o$&ZW1-@DdnNt}}JLe z`U2fBvpS(^L{g+=+C-f6?}`Cahx9dPt3!kRe4(?=g1Oet=T*7V{(V}Bf{RbYTnpzr z&uSfPq{nL@bxV&yzI+HjuwhV6K1>~B(vLoPUXSE=;_E&>AJNG;E4#P4+>RZ4S-@EN z0TyXtFvng7{M_E=>!w7Unr#Z}PmE;?mu-qdbFlw4+nV{xQyt6R<-a}?e^*r}Xz|od z%<=Bg{0&^pJ$m}4%g%tDT5uPOeQ~gMO5bQ#XOwYeBvz|965F}>BhB3mv2B>zYLfGo z5v~~;;+W$&21$^l?@6w*O@Y$fzIW~#DH|-!3IZ~hX~g-S{!4^Gh&ESj1&E9-3=*cw{)t%ywO7$aR6|iOUNgMY&In+G)&fdTx&F5={-ELMzu?iF3 zATkXSd>8TrSg8w@+fUpDO*3-BaV?Bm5rasWC*`sJV%w7Ybn+W#1YwB54F%F@e%oq4 zD9o*hq+75^PDmOeDvYU1y8Ak^xO1%V5dOgE`CC3{*Oq;cLfkDwmz1?}2zxbnS zmp&luE86K4FlGy3Tg9tPatm5dpi8Estr{E@5hI_YPuOi0!% zA+)^9`_$df8!nFHQ78VCBzFbZF4ww#TB z6_az^QcprBj(T1qh#jHdqBF%AsO|LE;&6T)#ldQFuUWDWD$iY=bwl*UCbUI6|2tS< zgy(Bz;l;#|X=pX(^K!W@aV_TCi7jcUIqFxHEMuBA7A@_y5=GEJz^vFe0g4UXr#Sj#B* zr|?ti1W4MuoK*9FoO_(FWxXED(pR(pN9uOBT=hmj@apj#+!q>+Qm1+cn+r-p`RTYA zybmuFaFF{smCijrPiO7xIB$hr6wZXF;0 zYx}dTg~_}P?zSek97H!IkLO96zS=;IfccmINO4Ej$mPg8u9%A_>t*BZk?%{3{Y*aH zy31xV)xw$(9ANLNy~gq(+#@Hu$EVJN{<)Tf+oQC<$4|#Lt!ewO#Xg&)-23MA8Z3-n zHG#P$arVD~M8d^A1@_!Cka(hGUo-9!l~~yIaXM+#gti|M3-~6yg3e~DvI-`> z*bvKTw#o*aT;F0B#`GCqqG29vVf6hjT>oGA20-_JpYr_oLwthUdFa=25;J)p+7S`Z zifKP%C}8|ul9o}zoVBE?YSvA)+r~?K|I*$1?n$n(dtN1G6K>UYek1>_i=L!d_&E*q z5SQP*o+BO05j>3Zuh8Uhzz8s7a&JtPyBUfRyJVAceBY<8!c1;b%X5v#}8i0JOw6;Hi|Aj$ey4% zY-c$WX_5oK8a~=KFQbRD6)=yp@9(Ue?t0>J*0ceyq574DK*7Xd#k+sMxwz-CLc}-3 z65<6T-NX0k@TO?(JQon}l8<_x<--3(-FyDC;s5=kpH4;5+B2$@)~vlLYSn71_9`)I z2C*VS7up)JYR?+c*4|r7iJh83kZSA|VgwORt{cB|uJ3i@bIx`1`~`VDlDyxq_v<;Y zd?cHH`9ZkNk1hZCy34{k*MiSI>7C?3V1Hk5U!$gB+LJZizo8+h`j&pIZMWpXn%{&O zRBZt`OhepxidmN^#G18#w zr1HEGUrJ8XM-AHx6vz%w2~u#|FoIT1+0Ab*ge^XN(GTqiIt)NiBGKVf^m5pyT{3S1 zqJNF=z&@R)`$VRUqdoL-pnAUIFTRV)C$B0$+{iBb6J5Lo#UCfB)o?ML*O`%RsWKgsTY| z>*2n94%-@f(ANqorv$$GX%1HAE57wEjO~8UG@1>RP&D-j*3cs>Bg2=i!DG``;|o4h zghH{rQmX_l6%!!yD*5@9%}w1%UMJohC9l(NlZf@01oX~gwjVfgN+Y#1OP5}Hm{aWn z=N1P?Ri15`XfB*V4vsk#jxioHDbUn>hCzbh2;UXY#0mZjUVFA76z;ScBOB4#_TJvy zW=VgFKaUZcDNl$!y#zU%2KaA9{yKl{-sLGfK4NQMx-EHy)2;I;&{VMVxDmmJKjS!X zYEJoUnJ{G->ar}g6i2DQB11c0_3joTtH?*Gu6M}4m=vNc%KiLLL{9+5f9G~*dxE2o zoqOsRmzIo%^CP~6gj95p<%+rULc2NGvp?jAA<@u%HQ`fMYL!P)@ zs|MN1K-|I4UJeIaHidbHs7PJhU*h4+dKd7bcaO+*v015zXWRa7ML7zu*9!kz71gE3 zy#Fm7mj1tbKla<9TnX`FIVXP^H@3gsZv;=Zrbpm9MmL6&1WBtMXTXH;gFT^WK0BY9 z5GaDC;s@JhCumuhaHDqev-ntNFR%pf3}#B zMr9D6C1;mR>yU3@e5D?eVRf-r7JL6eP4*kL_}w6Z*b6IljVGBIVpla69Wm(&@5802 zKgR*%6!jVMghWlb0*9Nm2#g=)OW%H)M--6{-en3(6i;j@I|Ff7i!2zvN<bEUo;Coylz@xIi`;NdS!{bh@=)dsQD^!PJU zA%{!MUMzE_+T^tPJ~6l9P=BljdqEYnUuYvs)n+V=f zl}2>Rz%(zbypc^y`-cdMH^tNKM(rdt=8NYL^ozY#A}`Iyxj{Bi~Dl%QfG5wq+#!#@LEo*zS`$y zn)tvD*@oh|Nq5yr5N_IcIWq&i&=R+gf)Qd2wWL?fb$4@Z8CGSF&PW>nisc@eE!0=X z6U3ouB?bkC{AmrTkfy`K-KH5(;F3(#W95^j+6152Oz&R+(B3y3>&QBFO0c$hgRcLb zv}I8{M!ImZ)%eV zk4slg`#h*Nf7CkDkT{ZRPI+jM7&#k8J7MJO4FRKSyBVosr`M=kgegT4Wg&>g^FQXu zpZ`=QtN(#|WGl-AIauiFpG4?NK!~VPk7^KfR1#{TED0%v;?z3vmMzJP2qwy810Nw+{ zp$OFTz9+Gp3&P45+(o)RJ5pQ&w*TUpW8J7|AJy{$V-{WjbPC^_8&Jzv z@_tbTYBDGJg4-@;l1&qG)m<8fK+o3}Q)bsd=NlI*VeEgZe*yxgQ~&f)r*vnBbEAJ8 zrGRmq1HqgEp6uUkfZC6wKuNsxdR;CCRyg*H2K#O$PWR04grpY~%@CF@MD#{^b)~ld zUyK0O6SUZT-JDD8dG?i<%NXvJMe=HZiFQY;R|?t$YV>ib?{iIJds`q!vD`fC2--b< zzYi60IWb@ybqM6Hh5aN8ll5yyVo%KtI*PS^gQrbfSiMs@!?$SwtuNjWl)r5M4o78K zpB~A)TrFrXw!LTPk3Q~7nTd`*p9<{f9q_ogM^oIPmF6`G{HQ5b%k(2Z=3N#Kr(Ez} z8@GL{yL)jAGX|7h5UjoRvYV?m?3>Q|48~?Hul93-P)2b-;OUL|=X5ApHP>0kysmJ6 zDDTtzE5B%PL+9#HeISk2_^gguG0lQ*aV*ZyvMY4gpro_PVoc#Pw4boN_ciZOlQZwD z%H2UGfuhY2nJEjM*t{-Fy6#+hMlT_erB!Ad91GfF>m_N6P2s%cN-{pt?Y1(0;+rd= z(&8Ca9&0Pn2&sYMRmWg=`{w~iIVw`9^|Wg55CCLMwt8f~b>wQyfPxutSNap)9%(tmfshhsBezpXXaK`D){%#@S z*RsAVzv^c>;LDdzuJlybFPJnp;sOU`I!mV3espASU0Hofs#m=M<{YFU4E2@EHS^zX z-Tx!oPClE7htAr}-9jQu9|ne##827VPkX)fpp=`5usgrFH+B8|s5wnC?C22FyXxZ3 z(Y_eE)u_Ke>52|9ss;@L;n_boJC5gek5L`xMt!)tkdOh{FbjH7ct!Q?FscY*&VL=! zlhr#pR-SKY4o8*cS?{#BIkg4LHTuTie7XmE`1700<{}U84`dfWT@%&x+37sBr2mc#86RNC+ zdIt;L1^7R~@uu}v7FfxKc8%{_vZ5;-q7S-SMom~RZQN2FBe#k}SglYNOQh9}WaUz~ zi?h1oo@@=VDibxqjCY|&6WDp|Pa*gv>=;Gv8#z1r{F>@j6%VfVie|Xlz?U2^UGmP( zIxe}SgLyGaX&XK~6a4^6 zW#y8!SQ|uIt7QB}vH^aZ06k??jv3{@?f^y}zO zcg?hcagN%h^&t<%`aeI%UGv8LUc$4|8;yY)f2;vI6XKu5J;&VDYR;Ul)1RkaL?9YH z&e;C4<3pfkPN6sVS>ej2+A9?apj2slOss)-_abS0Riv0MMXWM6;LA`K=-@SzO`6-~dj7Fpwl4 z-^M#_+LcR!eM2d!YRdPV|FQN=+tc5Acj*Ke$zr;*mBu8rl&9s{)g?=oTCSHNdpjLE z%IA}c4@S)1Hp3In975(dnCrwF+fIT+yTL6Ls(E8ACDYASGc5MXc-i2ca;;FOU!qE_ zJhteGUO_&)N+2(}J;Hr~qhh|93X;Q(qO7Qkzowa&czAV9`k%kw9FIw}=K_F6Rhrrw zmOiw*g>oW?YwFE`4yf<=h1Ul?XbG)QpQ+>i1@D5+@p-d@?FaJ1Bv^6$xk5!Wxmo!~ zIJ$bO-p0Gl*tjFOL$;!*?vIK44&AF*9%cVoZqy&>;jcM&Jqv@i6Ck{>0=3UsZdWEi zih|VH_f2yhQ$70MjIGd9!=zO`sfTHx;Ab%wuq_Tf-h1l#V4=8hnJ*v43kq#^{bQ5l_gc%!U~J|!(sn(|N4U|{bo?uEj45N zM_jly^+lG_K=J(ezN{a}(_uT3TIqb))o~7Jz9o`qW$NI|^$W|IdZfBtup=|9Z;)wa zjeScFUsFm6+OGLyT!*~+;(CJw+IU!vHQ%7)cE)GBCQHP!aJ+XUFaSwTH_Wl-F+{Ar z8!gMM@d>Q9^UFCR392sz^)5W)kWKjgEn-X)u~aUt7b2voStNs}X8@tpgRdC}p6^9aJ-1;dsk9r-ASj&O7_UhX^nC9+o zFub|(18yrHIIH_8T>ta+sqw$C?02aaMxp1^etleu@3(h)@a-xp_I5u5|3UOb!}}j_ zb8WAG9q&FlKFE+xs4(;0ZU{k_JqV>EEWxf~4J_muqL9J(%18E^D=$^9&)^>^LNbg@ zP>+=AR}kS`=*8k^2TEMZ0Tc!|IXW7-rM#4A0yn{5$t63!SsBk(MZPS9R>9M^PmbSx z$@%Z({4LmjU?3FVeoW(A3`RqQFFc~fms6`~IDRp8;*(p0DnxBCDTo2SGumKqZknpl ze3WXGB(}7$gHaxjE#j*qEAV~Qc6-_2T~89Yl%LRN^=5LawqeIjKr2(x|K=DIP(B*z zi3z>~9~>?|?h@BCI5H_Q{mptjF{^}@z_I?_xi%Lh|H`yUWwV2A0RYO-niZxv~jsc&sZ)`6~HEwpx<^?k@?H-w(@fto@~eUv8|>uvh;H z9*)f_UnPS6*dC@C3?{_G&dOCEM$xDCM67Lz*<`VQQmLH{Dg0O?6aB@g66E!_*?Of} zVGjBj_XOeAOJk387C<|%dqlhp&-xwuN$WRTMxtu8fQINvh{?UA#F;&J+Gw<%fNt?M zi_{Fa`>({gijHki!j_#+HmRt((Giv}*WHGbLj7Ua%ud%aJx*&oS@Lc~3gm5R^(;5IV1SAj1phxz*)lRVpTSTbEDGudiF!!>Gn+iLB8`KlL{%>NnO0RuXO5 zDw9_qoEOKy9W0IJiLh0Trufee%qhA(8~&1P=Bz8DV#@BHh}o_sWyfWZ+;I?94ni16#9s;Bb4+iL!Nc6J^BfMzP|G|BOK zbV|#c_vSz-{4Y~CPTszi0dlc?aZU=EKjH7jxvfxWhg#dHfJ`U#?7Tc)K$@mk-h%nf zbvL!W=Ec3Zt1){$o)&!D7P~F*93>?cHO0W2gaOmMcLzr+QbiF&hI8kT?B`jhYsqYs zT_*$-aux>)V zt3r7@xZknv5-1qaCV{wlv5Tfe*j)Wg>kI5Ki`0&VwSTBqSEesfMGrlg{BjMVinzQ! z#4G)ga|Lp#-1FOXJWX~>LToB&irebten6XeMNhU`Je{#4xdHC7=-hUm4~DF8oKbTN zeY)Lp`+FY6$}P__Pj_a@wyKAy>a`8NT9b4(l^d_4@fcp*I}#j;Ir#^(gj7Fypck?9mIE)*8Rc12Ovf!GK>=fq zBDLF$`G1_a`!lW%hr3Q`6dILX%zRYTi{@*%h>UC;3fu_E`Kq7&F86qeBF(-yb+{1e z8(g2HINRyuk%qjsc3rYvU@R1jnjHaM%jTmUtCu_Z({r#s8jYpP`?_k!y>vis&ASn-k#=L>=n3svIELenqB(~l5Zp> z7`H004#lURmc6Lr#(M*KXRD@W<8G{ex%;!aY`&-daQee~>$^)8#0#Xlhfw4(f72fZJ|lJsck_1vWiJfN zydf3v`5{Dk{1HF10Jh)Vz^V8x#>BhXL`RA*dLVAnprd^`k5F^7kwx%b0J@E5OinoS z$E^4ep@`g@8qkh~QmcFiNQ-V7q+#D53`)SDA#M6Ku`0;&cB>KZ{uH-IQZ@A6jSXMT zpSjjGf3z`{mwP()tlrs5w12bC4^zgw$cpLuMhN1n9A8g7_~C&!YoxYveC?kqVo~wv z^x|xuLZn_0B*=_Qu*F~0lqBwgjB4k8A8Y?_&NaPioUKT;4y@Gy5UlAo{U!QoDl|OF8ia%| zKi-NnCaJI-@eNKvj}aM)7IHuKTg=AX-v_{t0$*_g6(c=|8_RwOPoETMqSw>UvYpG7 zZTUl#>e0NiqA_omJmc??!S3-A+#TY;3S38Y(Z#zJe%=$&B2e3Y@9r_w0R|xWEVNx( z54}QabStNomw)z&@plt+t2B{0*d6Bgw@;;rS3Qz2KWH&*dl3QK{GK2$8#(s%L`x@1 zcDh&gsW&sShp|5&W23VWvd{X2N5bwG@!g)jXiOnuD1CxhbuM8aPCG`MrNbFW7OVhT z-c^OtAcyT;2g1UVbUWG$t$W7~?{}RJH@t z=80y~+c>$ZoEKr)CbR|)HgOA{VW$`W%7F;r8F@pW`#%gWC%1}5(>(AnDvug_Ur`T} z|FXT@5shKno}X&#d06<$sF;y5tjfKU0!*k?;cnV#H}rIvOs`&D=-F;LN_9$#t9}uC zHy*OELlC41UB22e6W=d&la`NAB@pmxd;ZHi1Rv<=;t?St+u zQ#6+SNgPMkO-4t*Wqj;*lJ_s7SghQm0DJVRoJ~V__n+Ec zNqfZnPw&y7&$2Lm$h(4a|`6d4)Q4V3Q@+a;oR8>8=<>Wc6UQr+>JKse=_Q>dt zA3v+9s=Bf6(LJ|m&X#wwbN9)CQgn6Ra;OpczezlxTU?_PsE?N(ncfY z_A!9rc{E!UsP4YhR20jhZ!1ORG*H+-TD$u4Q?}`Z-<+>rs#$=c)e3Wt#QmoLCe{^> zadspk?Df9MGk)r@SK65Z{j9nSVr8p-uq4NuYao&(E5wUDJbQoRmSA?cHanxayj#dUnW;FGu-4;tBpMaQ$&wi*dGiwP_y%BH;>P8KWX@8 z{;TApU~X{RN8m+IjD*H%?^xIFbh*>jYVV|T9n+EpfMSgQ5MZ)fCps7D!60f1=Txw0 zd}Tbllex;3@dLUQJw4JA&(Uh49>|Hp{FJOye1pFSamsDP^qd4stkYL{Pf*OT*DtAI zhh*Zf+3;-bs)~Hc*Bz5UX<4~=v_l^UgW%#Ts44^KP7iu8z!BWjRMi_~O_< zZM3uhtq}Tuk{AEu5xum|uXYepG-@nH_Ix6E8L$pz8goH>efFNgc1R>~M8&}H{B_$4 ze;N$;Guw_DmM6cDy8XD3?#W1lo=#BKBOmSk^u&kzb(qkvvnNuG#C;dMx#3wE!A6+Q>ed&9MZDSe zDvLr7GkPi0iWTbTr9ZX{O9Yp?6kZbltgmc~Q|K(cM~QM$&5>*9Pkc5}Dc^7Ns+*=~ z==LftP2i?g#7m+brR^!bD}UvMd4t?31`8dTk&q&!m&(QJM4U|{?o_fR=jqV3o1Y&X zs`CISswOT1f%UX=uEQhDCO$47(SLArYL&m%Qn$b?pqP@z%|r8syD}Xd+_JV;ZM41b z@yTB5b5B+`&NSJL+c|D5gttN6F#Q?(1UJdXr`RgP;dpwjiL4ZF_eS777D&&VOOabh zwwy__$(|m%vWyK=xTq%ch}}Mcdx(ceK?0uf=k3iO)!43Nve9nZstt2pg3KR^;iD*F zLHt<&q+CC6tGBmzSD|p(ZR3w>v-l~OnF@G5wyiUpnbg}?rw@AmVfD3FXoHiNf$ZRS zrz$wuLgStBLbY%;JkTzsALMfrlL(B|5bx8v8{=pDVCzJsApCKz5&g-WqLJxHn2EMd zq+6rNMrY6K*&z5VV$xo*t+f>#@B?gC*6=e`?ZJE@i+IZ&kO=+E$bx1?{bi9SGQ=Ip znBWS{7PoRf;d@?OKJT};`b1BUCd7D$`n%WEEopXbY2}An5NKq0L&D41<$8$IhWEE9 z^n>K+0Yd7YXz0>$VM4Imx>}Ki{BLZjwS0%FDKk9`NQdlszv55gxpN0Q(tESw;{bn& zEy7KZud>5<^O*kV`vhkj)y$5X46lOAfnyC!z5TVSMm7ApODL}D6dD-@=)Q%#L&~Yp zwGX^PSmZ#+`u0nefu#?A%+A&jh|2l^Llc%_A!n;?17#FmIII2i95^gx=$dZ^| z3*?+VvmMJYu~s@UraH{a!mP>P0u_WS!iPZ8V_mEQqWFek>7@r7>Ns}U?~3boF++0} zI%;&z`d3;UDgWb#GV^(r-OfiamH;P0>Yp%DZY_C2L^*yxPW=>ThmWi#CsKq|p>`9T z*p|JW-H_ILddSv^%tAURpw;<7(sLdJb!vOLYQ>9Zy7qkdQj9!^4&Hui|B2ZIA5^@d zwU6L#?Vv4@YRq+U)OYqwuQl~zwA`lg4jh(JQetRTW|a$|^q5IPW1i;K$<9>TH?M3_ zq7!E?MxPaqN?ax=%>1aq%r#N1i^^~JeNXP}{RcCbG#UI>lJF{1%Nwek`Sw6Fz&T_t zM;o8MasRwQ)=A|PYj@{+70~O#SDo^>Eko6 zJ942Ox&Ay(#VF`FY>O8GH&H*p<|@eStBt6fE=1JOBHh*+v%`7A4*DsABin(t{II04 zk7h{c0-g8=%I!AU6LsZ3xBYx6^`xZaipJAII=4_+Q%|6t0I7i6dl^2besJ-|-~O z^DHPG*c@$FpE=Ta1UPT5DZnu7w4k?@txSWS0!vxbHsv?!x24vYG;fcvgu=t82 zgQ4u+oLJ5sCqJgaFYg)`Q#@AY<^iYW0j396gj-f9mqlv=TF`_yZ-s2{HIlP~n`2R4 z!?Re!?(s)tKnIWGacJL)%E@wYVxzG!D#h|#UG0QyIFv8+MHeq-g|>!{D`y#aN+atl z4Va8a@KGTZKcqW{`$6STJU!($tgIzHyNcf7KtetKTs-ot-=d~1nAj?_l%{>0Jn76h zU`>J1|KD9uw;-n<7*yKSdRJpq$ zFc)gVc@G|TZ7c_nJ2)l4cI_nOY2%<6HtDVl1Mfse$q*H`vihefa>(4q zRt7jOe?j(+Lx!lKA#LF#wWUZ!HT;eG zJ^sk`;8jYxS=;b4Zt$$w{+W=;-=^R#cP}swXqn2Er9;~}uTBTy_x9Q{Lmu-NM~^0# zHjlnOASg_YCM|z8x9UFX1H8bfdg*S$Oyyh{klGb?2bX@3PRpz&l*WOB?o{(h2%;EO(qF=p+=jVIbrgm7B?roOrh%44O`So1bpaim@ zCS6U9Y8^w|o2#Cr!qHbyF%jAxe2%?fuu@S{grIKHoOusgGnsQORo6v_TuN>AjO)_= zbY)de^`hVzx5c+W94RD%jy8Ytr`)PF-cjQ{IwRj_WmKt+HNEE3sU%iu_36bHQb zx5lT<=OSMOzcIYFQK)>)s&7K6)G!;A3u#TUv!)}$|-tL>D zUj29DtpNUR$5DNy#_6Pm0}mii1;R+k4L^0)<3*H{s4^z$Zd$1-%5@AnobMh$o6M@_ zrIIe8k#y74NXvVW7mfyBUzzMBD&Bmf67j2#{hQhov5sSoG7^+ntx~ZdW8;i3|0~xu zgn^0V^HgB`f}RvJ&gN}d*<7<)SdKn6O1m4Rd_}|%%Dfn~$s;ATsZe<9(*iV1>8QNC z88RaIoX8qZj}@-G6p09Q-zUCyjwI(MZcV<*+RcB1QI*H&FfxJu98HWeZaWO`stuU& z1N$CS)_z>|@XqS^o&M-V3rgFaThj&_n`K<^GSpnS^V7T7Mog5}W+CuY=E=v@)TcDf z9{SxSh-&!oaKU4DcR!9X45q0(sFOYGSNWj3#|9XBWpEV#H+A>)zE3>^jj05gUD?2p z&&gZkW5f;8j3q-c3#y?yyYhnM>bb8@DVrmB$k3k?98EhN!dSY=@wU2(=)aV_r~tHN zQk@6y;MXOf`b&s~*R@~b(6+Nbw{})?gK0=lIU6jw-yML1hxXC{KOJ_kD<_?~2|tpa zSZ{F@X^E75tl!mp@6@~$DF;&KtY!VXKA$w9}-L<9UDLV@NBH^zYxbc_MbuK z-xu%0@VV41dE?~M33=C5daagrm#G!qkwxXX=WMGjr5Y$t9#CSP*zO)cud8|}(yFk9uSNU=`;l!)sh!%;(Out&Ug*5Zsx)Ij$D}=73$OgaL z2Rnz#3G_~9+d$+nXU*dvs^Xg@yPg`|8zQ3LyY@V?c23S)nkzMQV0lwW?N6v%R#HTo zxNFN-@y+AXE~ACFwvAs0K9qmstz7He!E$kF)}8i$!4^>T#NA_3i!>D+%c`(Dy>z|4Cek36$qd&x%KJwiC8|Xiydc*yhXJ+UfH-mmBBJjL02RCs z3Ym;(T$&?^O^OdYU9ddz_plseA*c5Wd5rD{gzh9p7{AW6MG=1AU+W5^q?UH~KPXd) zwk%TG3p2PFc=nW-E{ddeH4j0*?dqy=Rrh-=zZgRn1gk~FpN!Uhk5Js|pR&5qC4;&0D z_olsi*Ou5|F&gY1&~?xn#DM%LB3AaU{EuAGMcB&$@t=arUh#|SZ7dn5?JN7=DjlR*ST02c8a(QV)^X zZnsO`(K_`Edsc)+6Im5d`TPA6@2MHj+V8Uq zw{P|#`a;>L`=S0`G}t)27ZBE1CX17cU@&(OuW?>>sT3|MG0@=bgQ~xIA_-seY>4jgyX6S12Ik&ld0+v-NFC0NBB^ zKGH8CHR^R&y2DBx4G2#S zN>S1+Of7t4JTJOU{dmJBq2qXDU{@i3b)EPOZ1Y9Ol2>xu6%1w~*p78j74eTeHz4LE z=Fvf@A*qN*S#59{W1N_ix*ySEGk06&$_&pb#)E|{96yrF4G6aQomKJziC;bZWXD#z z7)Q$;1ZMmio%j`DOcl5{sqcjyN9q{b(~_LQ%$t-Kgwpb-uV0niBi*hX%x0uNF0bZI z2lq_qfEL=Q*ME7~d{#A~v&UHr0N_)@Zm^s1ao;x(?ebW{z-smj>Kc(gd% zkU`5xb$AK~Xn-2SheOzGXEoIke{LVpTPu9xJnV5E5h&@GuK09FJfy=B>&UeV9cr9q zTA>eqD(+=3i@k6-yS|i@aVA<+dlTb4SKFH{1$ngFmspL6;*;0~tqP4+ZY)8-^Bi6S ze@VN&T$OZi2iQ6$J>$>uf>(M?Zr=HBxzYLTUy8b6+pQOJe&u}1j~lZud_P1-@Z#qu z4qY{QVxH#0AlC)+7`h)QPBYwm=EqIsCNzV0GNBa}5?bcfsP4@FqXpQ>zc9i6Toox( z91HZV5azbNLhlo?*m5%dYxRM;dX${1ji-WV5%?|h#!~PCYl)dhB z1o8Ixj*``p+{lA*xIH@{PZh>U*r|BgKHu)W(x5y@yi;ke15!>4n=Wx13X@fe+Bao) z#3+Ji=f->GMohMPnyVuhG{Rq<_HKOkOjsS|P6_ZG>DM*1p&e{GBG7o%ew|M(JuhTb zGtr6oZ7m%qV^Rvj^XY#8Lp>mnAfOD|3XfBj=&|fn{f_^=`OdUR!G}3j*CJ_gdXl@F zi1kWY7?&H$RJhTnTngV@*6?INhOy3cXg)AGf=%8SN} zD@(%`;k#l!d8ZU%e)!S>f9u&Y6c_ERaKs99SI=y?H>PL`Wd53ibRERjW?@@zuE!bGib>>) zrSHl4B*ONxD6V_?C&#cl#xI8VN*=CA>khLX`!h)d94kbbgyZaLT~NV(wC;}ztjDvD zjI0;iwPr)NM?o>njWnUDbtU;t6#zTrg8gu755+zX(B?M(%}r7ReOj;yo4t-#dt`6U ze9YYe_ugFdtL-_?p+CZsh&~!Ej2U1fqc7a028$xNOUMb?P8Z1ROCFcVXZ zlrAEh)WQm%Om9kqU&Cte4WA4=U6q5kSLSvFEiI8x;)ZXQ`wK~Lp?q(QEf=MRWl;r- zJtADx=KPgg94jimz2Vf{5k`mHG>=6+r8b1+6+hjGMph5c*Ex4!#2dbjY$~K0VSoCr z9s2PzZRd1jl}rk@gfnHpy_o2ygfsv5&!@ni3Jow7NpD;+sO=KO07Efx=>XmZJdizf z2(r8zyDeQ?)O3wbNqKe{ma<}x=jH&YOG(B>#A=oKatm!{g(?T#K;P1SYlF!e#&zQ` zF-?ImN>goANQh;;_t(hmu0mG4y25)XMVl(`LhVg#;>u!mEUmk5p_qX8*QssG5T96l!XItcAF`zQ7N@4{oL%GI zG|7E5V_EoyF+K7clWWK0v9F#Meii$o?x+1sYjO!ru$Raqcjdm4QmSeUx-MY z9aLdzr2A19Hn!6Cf}MLrT&K&UJYPX=;r)9-MeV!dE<==F^)mO4q|mgHL;$x+%l z3(PlkJQ*k$st;A#`ZZ$1xvsruzqcrAbz?g9vf2qX^OE+D z1xm9oRWy3ebGjRO#7lF9hWw<1@7hWK<@BhEZa)0-^snr=Vy{ls_KR=RezQDY(VO30m%IY?D~*U- zx~axt(I%J2Udi#McRI{9pVJq4rly8w?o=~6k(>EaU7RDe#gf1N2x99-i60GR6M(C~ z)fcys&L4C?rvbB2zfO5pRNQVJ8rKT3y?e&Lz*b z*&$aCSVtZBs_J&t;<&V`YddmM2;cJCVJ zy9mLD>y=)GSa@qZ-`k5#K)gm}%J6hhj~2W_V6l0@7Ww_pnbdM2c5sv%`4l>Z8Q5za zr>^kSe_ zcCj@n7t80J(7T4JZwi2h*na;DO=kl(=P1HAm9QJh!E8H^a&_YnX_4~psS+eA-|f%*=Gy$)n6lPd)`366W*?t=&U0r}v2O58|Z?I8$&i8#cRx*{cRUlL_g}YlGuY5$Z7d zu{WN*tLOjZxw+7ypycZn)RlJ99hT&s?voGhh<|a1{-`O<{1pS1H1QW~Da%|h=zLFddj>t;58zISkt!4Nz3u!zY7K!J{@ zq&B?Bzh7~)`4q&5&T89vrx*D6o$k)ze&?t!o8r>hKlymFpFkY3DUBPJ z9#6F+NDRQn`jty9h+=k-kDPXYfGv416-!b(jh}XP4wnwWP$~|JKcse8%3_fLt|y|) zCs!y)%B9_7N9^6PHN+B?Gr|oHK=KAZjc%i4j0UbSsi4M`PP4u0s=Qe$9$woVE zrh3=9_P$L}$?oey?%5s$m6{E`vY^=mKvP*~{kSQFNyqbV>3;Y4C8En&Q+s+-Aq#I0 zW)x4IQ1uyhAzk)z% z$1V(F$zCI}`zP4Szb$np3j*ODy&a6Cz{F_VTLlhQjwH|Dg}Ra_s6+{;de4>L>V~7l zothZp79BuueMO7h3 zq=Gn{WjRo;7L0rp5*4wj*hf0eBU2Zp?aaSFIOsdrZEF08*8-1^C*)?!;LU?6zQ?w6 zx$~pl8<`D(V%V=4T5H5{``()M%EE~G7d=B15jT{MKtl-RQ1|Jjti)k%`=(xno`Y)) z6FV8gsNdD0>=}- zI0bVl^xoaJ=pD`z%axTmGhXaY%uTe3`zMsmx6DBA$5^f@$}@$&ohfkEb&uR`Q6;ML zjVVDbcxb3yL_|StzBA(XcuXVTwrh#8ps)gn(mVVwv48)mj7)%;wNfd1U`jT1fW(ic z(Rc7Yt)=B{=Ut6=$V+Zy)lUJCH+3n z`23?9-Yjt9*7TnK{_4`&_V2p3;HlXV_#vamp%iaBPZ(1E}xKq1{9*gpZL{h1~SYPe^d)~e?^6Lp|i>Knb z-!(Z&9m}#z?~lXm7oW|ZdFyc6pt?KjRnsVhS=aBnxa{qSBla{HqzsDc!}TE+A0FfeJ_`$H|V<>uyemGno86 zesQn!`!|=MF}!1z7s)c(J^sYTJK0qtRf<#&aXlN5Ce-a!-EnA&1^$MAYJMOZOzIFZ zJGi#CWSIw)`Nx=Rv+*v0RJeXg*ZVJqa-?GR6sc3<{gq0L3@P&+cxZ^P1-=nR{gBT*QU0l3FC#>M*DL7I(`8j@ z3PDRUv3qzJjOSE?OOrnE-)ddMNDgz!+D1_L+G?6Ck!?Oz&vYxB?(m7tmt2Q;_irLx z{k%^WBb6TQ=2=l^&f&08MU9a-aGUo)VcuWPMs0i64znOtLe}6bZ=TU#Y1W%j5t`xe z)yip4r!pHcTFG$ElhMh(@l|!=aG<{u==R^H%2@NRmn2y~c|AsEuI1ZJukErjig#)$<9{ZroS*?AGrSxi&G! z7#YbE-sWBACA*GDfco(5{fg-?=hLFN{ijSpDyWHPL+7^8zAcrb+SJJxa;*x0=0Gnz z`TZ0u1HzD#e+Xo77|f>p;tcYvquA208)PbIK*c87h=(VuoBEZ^kD9?UKmL+>!Acuk zdM;{XRIOgJ@rR^f5tv9LZC=N?1-e*4Vc#4aH0@b%m;bGeNAH~oFST>%l;DkJ03)lW)VG5pg&JpbPGd1t_`_A{9Q8*+uv(H`ahu`5Ysxh_W< zQC{FeGpwraqIm>cXY(aZoL!CT2qK0=E7j^Fy)zsUr{aQgmilX!)3L;%VdQt+ z&;4B2@4jwVoL!IbRV?_akUjCd)c3IPTO|rB?I|%Q<`L8$L#>4CR z%}`yBA>)zW$g9Z?X6b4OPA7+n3Z?W$XQHM&#KoTzV4bxIpmi;M4z;rGsp)8lJ1S0I zK6voc-(_^v9;v@Ik#L50Y>&Xz{g}6gzEVqj=<}yS7BoN98R{=kI_R1Iq|9xL z6n-^GT6lOu8~BY#jo<86wBqNc=ITPVO5Gn-YJgFUR;>P3vrX8QEUrnpS z@?(x;e!Rau;xDZkY8)Etgl^#G8kT@Q5`6)!eta3Z$hY`A-G;fLHy1Z1p3+q#st#?D zmzv~ZYN0FFguwIEQx?{am2{TPkb%%>)#G!gd9NiizE3l;wXywFJCoF>7HZHI|0$?F z&ZMcR=53UF)WO=C#_vA}6HhNbQET12Snq6bp;`U*$(5vy%baKN=4bUnlOr`uV`?JM zoo#9KAa$?=| zu;#w++2!G_rX&7XdPE&h@Jy)URw&1Kg;vavRQ~+u_MfJd*#SEAN8+OY1Ty&X^zcA7 zYY81oLyHtjDs2vQ=q)rN~v?4Wp3BbVqhI8?AC^ z&kqgHb_}0PQ`?9JtPy9b?Vfdf;s)>%iv{tf((P_ICgeqF&9H6sJ`2#av&KizQAH#1Z=zXz-;V~vg znCc@?-_VC{Z+y|Pz@KeZFJKcV;FDz?%nhP4uzn@5?x|iiZw3B0T=Dlsm;6szgNX9c z>^|8!!%lOUh6mT4QYs;rbF_BO=JCnaK*MOlF1rUXATZ*+&FSpkkU#ZJoFm{<9N>Fv z<1sRgL(fpsc1x@y^Y^ge2HuWQYJ4azGM`B^?8b)mk&NB+!wqK#L-THFk5cHDL|hZe z$^dP`St{&5c2Zg5k-3zYmILAo9PN>X4vF5%(52u!m?Bgg;)s@A!{LfHAtxI=rLz%; z-&R@?*IPw*(x|C>iTGo|!;m zkD}S!UHpv($96nH&~(8a9`?rvsrr}Pak&3Fbe;XsVJ1RTuf7#>)cbFUmF>TK_qp6jR~iCs&+05RBd=vAPOS#(jT zPx`kbGQW{}j1mobF&uaQlCyRu>08H9OBD35DHV*n@5xL1ye^j(jniy(Qk4n=~8 zb6|xY@|bF+-)!!E&vaX6{*Z6Ird8q-$*ZB8>jj4(+^Wuz>@LWPL>LKRjo5AsJ9wFSi5;BS`G!*}-bII!wh{S-cLD7`s%RMJ3Emw*iSCI3|0-)E``Qk6~VvT^3hNCv83= z=$1kF6$=vJf352pmIZBrC;z(q48Edy_+JfKQJY$O zqa!Fiuh7z=*_O-6>6D1qV;r_c#cX3 zYQ5Fl_pdl~4Rd>NAMp*{85HgK5?^qv@Ub*w=Pr5civ>5X?OnK_Yc)e&wk^m(SaK(A zaM~9mKhC(;vmmyV*ZmEGFuF-}ac3E@_UFJ149NFYU?wsIzYm!>=xPw2GL8eTGK54_ zB~h2HRaKL9w~am5l}a5o!`A-3Zp~S!WzPCSn+0EGJ@xC0D)!s>!7snjx2|!#;_C$# zsyz7u`dJ&H)#&t?-9LXN2#&c;cU+KD|6pURP81|wHrHS`T&DqU!PeHDp7liakLY?1-UzmZ^EAOr^mE5{Z|pKE9?V z05+s0HO`Fiic0J*yI)m4}xq)4CTj>LR2CHI$!Xn`=xZhy?Z>Pj`AYK;h_ zPHOvMWZ-NjWv4ID_MM+k2Atcx9L zf)7p41iCAjc9Nb~1_>FRJga|r6#?(n56ZQ@?}W1$g}s2nEwaSUO2itr6YoJ=;Ta`{ z7QX=*0zBTT6$0Hk914PO>DMkB#};k9pm{Y4fi3v+H(L8#rn!A%r1;mT=zY9Eq_)h@kfjd|rQ8BJ#ktiK; znO6A(N62*te|>7$;%O-?twA}8xaMxCx%aR;HPxSGL~ar~)=O4@1-^x=u+Srr2;2?Y zt$j^mXGM>CMQ%ABn0>TjKa_!JayJ&*xk!!IBCdK_GRIIpO&jc7_L2nTnuyZ^51tJx zEQ>-)4rLB^qAvGQ>lL7(+Q^NJG(($a*8?8!QiRn9Z(HMoPHxPR$Ct(Zg4v#i>A0dq zCN&8WOP@bLpSkW9&>2@5qcKY^KIl#Hf8WBR1i=4V>a0)p1VhD0Spr?64z;PW_M7`^ zFQ&4~7jBM*F$fzzH+*$|Li$h52c9lQRunk%#w~dxiV3q{*6gwISAPvv@l`O!w&T|D zi(C^E9?xlu?;mQeEiKl4&sP5mZK#%-xHY)=wkO`Erm6&*;^p%z4Pz!LsVMR1Zo5<8 z)fK~>_jRoze!7d%JogioA4==}G=T2Cn5!>^vUE5jCW2e%ZI>i?daAw}A29`pvN>SpD6{?b5rIg%DMR;=;Reo2o{5!q!p8`9)Pc*5 zrS~R<_V$;5u{bZjytb_XVWebt+<(kgWD>o?q?RP?=~$i@wbXcz3=H>WL**wu8@?xR z((gt~q))?hcQH7?*8!?BYfFd=n2&;{TCoj;lBGPKfIsG^9K5)%=-uVv%Ff ztNsaIH47hG(ZbN9vmF!b3BGmh6lSTX#B$Jt&z~m5OQhkYk&vWb)GEmu(CqADto4{0yhwaPFq?X@%Wym-B2m*a5{ zhu|r4{$zG&Pb>FGt#cg?lF|w}QX$_G;aZ8I_Z;!jNZ;#LrGz<^ z?^kxy@wsvqa*M*>Ra+dv01uR~>@eMg=*p8OXJu#Dmd zu`V(dT@NFkz`49NfovJ%*?t*~@-T;*_BC!&+lf+PdiuwD#2r2MxucY$+uqM51!j%{ zc35K%^MS{|N;=NDYwlGE$hY?Nh_AW4g#WNDUp#SPKJ>iFP#Pd%4f^9B+cFKKeBs=A znqgg;PlvR6`n~phoCkd{FpD|zs?i#()AQx8)4S@K>^$~4!ndjY3CUSDx^sQ_Q+51m zTJ!W0{Sw)3`a?rPAZ0w!O;E#4a_C?)oxPAx`F#udBHGQpSEu7oMU{Q<&fdtUYtRj* zz!{iLR>+_Zl5@j3)7D%sK0N$9E;%EIHL0y?_#_xj8;BUOLo3sZqYg$-DF9Z(GBvDZ zP8iHnv9?3q%f|X4_zFq@4jl08pNLdEJ0Wp;!{LAqzpa@r)>dXH^*2b0=A>tuE=@`w z(rsA3K9C>XP~w+J8`{_$&#IH#Wq^##{q2o#a!4zj=6HejE}nK=T2dN67T@GpWI>vK z&}cx-P5UkZp*_9@O&rcH4Z-lfrauLIJLCEQWLnE6&Rj1-F?&+_+Gxrz zVT?$%LznEBF-+bs>fm+gk4uEJnPxG7!{f#9>zK(6=eA;crkXW0kZ(66D7zX$RjXEe z!sDSFoLc7Z`oPBu+Z%vz;8TWpfA*9No%IDo?Ct=Po~X$s;@^}@g&P~H?C9tn)T(z8xlf*bRECpkt%O z_vq~xJfsq%PK|mepn>@Y`oUWcnv~Q%B!Z!?YaT*PG!5pZ| zmOg#$OJhJC)sPD~PULZDA9qHEX+P?I=l0~O@>3$0+h?Jl2;BgKDujt8$!iIX74Mnf zvmLe&LQ{Vm2Z{~4I8O8+EUkf_gM&+Q_sy53q%==H0X6>PKK}1(f6CM_H;!xcN{h>o zXmE-`q4GrTJ0(-@70kp_t;YSbn7@r^dKWK!H4(L<6KQs1N5v^^b(eu*78bfvj<&b! zH@b886Ep?9&%)nv|H!rCfL~mW^D*u-$y6i(M?+!8NR?n z4;PhiY6n)G_KHueh3YC+ydCMPvYVLU^Z>9J0>T0>lR8Cp_%LnA-JPLu{s!ucEGML{ z`J5}4gD0o1wXVsrH?-oY2KKb@*zd!Qm5Jn$#wA0ozN4M#K%H$ZFUy}sgCiC#WN17_ zSc2u{eHWF0YQDD@&7omq;lPx+cq2^58%Fi`6tkV#u1nkQaCI0=`e{0LiaBGw47AEs zsoIQ>8GB&G&yn8oM2A1Go*V;YrwYT|F2P6G8ax&rXd3;d^ zE&NJh$s!1nt5kCWp`n5vSQ7ZEM(vm_hv3%zBxJwrGNZl3(d99jL4Bu8$@D^pTYBhP zvF_6f7a-x*#u}I3VT0Vt)93Nj&-ih5u?~TymyYQj5v9XLxfneqp8@6w>F&X=Uq#vv zR6noGKhf+7kHv0d^%gG&S0)hWT*f9=8?g2z7u`G5ejFW4pxy4(9LD`HC4T2NOvg-O zFXzdh{;*FhfVhx8ms1XK%o@2P${==0tMErJAjCgDsiF~Rq0h(H%pyiT7xw$jfylc1 zbNX^}R>2JD)gq01OW3h&OTq6JJsS7p(onCg4<{1?y|78VC>79z9{xlpGzlcXv#187 z+Oulz|H=K-aJC&2!8i=%K!gR;)2L_#VG?5|C4vz6JBH!=wRBVph(307-!IH03{||E zCNd;N88uaES923$9_9S|TX3hz4%e-+X+v6DXGNZVz0)ueopR#vvEJmUUOsJpEj=6R zNI19g${2~71cVqO8R@qM14sMtnY-z3o@)SACCwO8-uk27xn}M&6e0Ww>S!(cgE|H< z-M`Y4=Jyv|t}-yCFlwba*!4Tc;}7qm6wL@35= zN^)|F(+=L>Tu01RD5L|_{8h5;+#z&m50DgbdAKFJRQ?+14k4_87FM2k&qE2z|I~X< z`)s*$D98{R;)9nS?ZZ0xm5$zQwy?H^wj*`fZO%j(mml)fWZN z^_D~#S9d0id0X(Fq3wQWaL2|*us@{{Dr9`ikDer`y(>6fE8bAIC4Ti#^qNRI)`YnE z`iQJon$SVz9j_hHuC~8gVIs{I8K^0{ripO%`6bV}^Rjv5Tgf8>_uuLmMu|0mz-!kb z!_HTer?A#KrD-YnqWnh}uZ?`SLU&X_J&?JpK(}>LtzlL(22wkJfJEVpe&ps1K={$!Se$cD2KIun(r}E~C`t^~Um|!gE zG~0gjol3~_;&=836D%w!*}u}(+Q9wNT$Rkdl4^8E!L$_1wpc{TE1i`#rr{LF&ageO zng!rKzo=%3ns2yFw~9(SS{~V@4UeDCdL)gWP(%f$morXdJ&l59Q16fQu|0SU-bvENX&!fxKGuQwwhtH6D+-8udW@fgDxzU;bC2sI zSjZXe`{bcLe1)Z;EnO>q!qOtyrpgI8Ir&4@qf$kQ(}_>qviCM^2JUs;N_2Zm zF{Cy7TD3&CMeCFoAausauemDhwj_Kkk@LuZzNg`vE7DWgK1@WQRA%@eZ-&JKAdspS z!G&d`A}U!VSehk%QgOO`^BLs?o~}3dv7xm|9o{qqS}iW;m(^3k((Rt3xD+d*I3Od1 z@&SbRwqof`&-iU>S;%W|?fzx;qT9a-S^GmV|D|ia5~wMmRNJJbv~2T#b)F$CzAewM_iBu(?f+CU7_fOoifO~j6z6zUZuxlHlK*k@#I#^@ zaS{6qYn<1gw**_4`kMi_IoNyd-?FjoDVy@$52Zc|n{ZEt+asesw9Qhh!;I&cM(?q-1C z7wB4#L)c^mI2mmC)V;9@Fg8&BG&(xwAcWVE&U^7IAz2OO{&SESuOJ8F0)1`Yu$FyK zgZ3yNJ6jfQ^csEalo;HT!p_4xO)=>fEPW5Dui0_Pd>bh8RiU($I6|kqs?5FGKt19` z@r1NPciDt19ZsY`NEM~kSHWlrdsj!kvx4iid*#ZLh`R0V#g}AjM1lH9fd3Jkv@PpN zNN&&a(bQ0F9)LOWz`-Zkw@*)^^;k2!t&?ofLEj_6YZwPAD0ZGQ?#t(`>M^6h`)9J~ zwEJ!WQS)p~Tac(3LDN=G^Qgy$tVyEH6xf0|kVB^ufenOhoYhxtlG<$hn$y~TBvx*} z(tWmDp@XEPDb^BuuR+g5t^UfvWMAs^zx2*=DmxX9$9|C?gKxyMdeV6%y!bH+Vz6WcEn{WJ&wQqqe445>H|S@6mLR_~)Ps|FECWP0*o1sP^Qt znor7o@izO29(36z_(sMQ#3`b zJq68zt}`9`yz#cCG~jwR;M{G!AY+QMDY0VJ*-oq?7#05use;+xde~3qwzRA!)wq1P z95^#roPx1+S?Yf-!9kt*NNQKK_v z95adctSCJX(-Z@teM65FAKCcEaz6?c{r%)+Xi~-*v%?ahX6=P-cE}91iJmbn7N;^d9LGAD%yc!$VOlHE^xhP{w26|l*r%>&x2(% z_qSv6J-9<9(pedeB`fK0r+mO3E!2)ObHgrMnI>76+ zYRnKB70~ysx1HUF_%U*hXY1Tiu@B(#Vm;~~?XB)LoKr6XtMopvxr@NT!DFa3?aGyL z@wETL4pL6Ct$1RkG%%NW)W6~xMaEoD`pka$@94U^GM2nVvx4M-R~A8!d+#{XXyn*Y4DAVWK!^({$S7ZC;#;JF7rc2DO>cx0YK_Co*O6% zzK>7-iiuv6FZm*rN}jFZgm#R(t1E zr!{K+>V38t$K+aub1xRnb#QwhU56x_$V2o@)M%53u;KhM_f`NdUPl^AXK-yCgGrR-X?@+vtt$*LEekT2QhxW7j?oPA9BX}e}nz6>)s8&-h&ITuBne}7CFtLHuevztP zTvwRD$XGzkKyIGbX??qXp_>N7eAxfn8CgJNT+NNHv*D6Y#58tC{!m(j=dZIE>8;y1 z$iW;xJe=3QwdcxM-&^E2L>apw{AcXKmVO=dacok@jc;>-7C_y$&CQKxxJanR->sp^ zN8q}EmxFe1MGDrbFVl~xpR(b(3hvNd_Qc@a<(Bp7^fb7t-}+GgmqM}7mS}Q zmPkDb+X@(_J%d~5rR6IPL=j2p$Zy4A*FcjmUS)sbNPitxXD{)Mnyjh8=vN<4ZKb5` z?+YcM9~41a_8s&FH_VeBO_N99ANHO6c%5xOz>)pG<`!c1?U?yh6{a1mfDFTP`@M~A zPOD6nGG@&h7u|RLD&f&tTY%_=fc?C-Zy{^usW`wj%TRe|q<^^8$@m`9_@s7esY&XH z$&+@$_{(CM78%_tM<<_C!dh5lQ={ z!Ew4j-p@?`!Hc_X*gW~I#B|hhkKXEEwNv#1;`F8VU~jS9)W){;Y{Bpq>vcx{Lv+{Y zJ%aCnMeIEJVoKxV%k$qgXj1VLJL#8m6CyMvQqF9fESoMEjGbn9U{Kdm`T} z?xbe4?dI=DDQ0zQHRkF98}g^im{ml(Y%(hPJPre!*KaqcrC+2^d%vjgztl_0GV;fV zsZV_;`hRN)85mVqJ}gCg&E3d_Nr!RE=Nz0nVdTZomUd_Jx1}hQ(b?;duYaVt z-Ux+mO>hDTTn^g@m^CiMOpDAkBrT$!wzre`L^#x4^3icTe=h2$!!Ow9vd>>ZK>`Ds&esNzdFAr;ICQdw56r|yp zx720hbt6ckj&*o_?#M0V+Co5ntj#HT+$9#SAqm!f3+5Z{=WGZ(jY};c(r;-?x68$B zy=xlo4*yY0nb}6zi-g9oIn7Ze9Gf#QTJ1TWQgFd|?&8&I{`9oje8V#jV^9}jU#$MCCNi=6|kKQkGy5kh~S5wd~`0Qo` zPuX_&;@xr4g?*Xzg^;G%pNM{8Oo3=Dxv}S5?mW*5hL9K_OvI`o2R4%{x;eW)$qBeO+N$$9F(5{(jUDwTWOOfR6jg5anz+DoUL7f#$+d8SS>eX$BE43EEsr2ty?-YI;07CSnnHES4(Z@UDP~@IJ>e~ zF#%6($mUs#A&4918l@L4{HDOxoiykOe1D4Zor)Ssq?bG!Yh%M+P8uLs+F3|x+g4Dr z{y|gF(Rj0*7h}9jm}6zx$2zkb2;O<_ZHyI`E_ruuxJNhbad8G^WbW%K>JSf(4N)8 zfq`(S;FXJMw7_@zfO<9mw8fM0QOZKQ`Lg2Zr*)W3ez@1O;vGEG*A$M5qLCb0=YmZd zMa{_c87Xv1Tu>u0x7+7BrXzc z3Q;2fi1Y5^o=V=qtPAFEQ#mKQw-irL5{p7HJ52_snqW zzAX;y_}$gu)B|<>`sjHje`1u}@YY0;tG{QSsEW`q)uy~v#2?H?7B zQpMe^*2n}fZd3x&bLjDDCs0fJ>K1x`5|}|y9AnsergwYJxBfAqY49BG7RmwC77I*P zwF>@9Et6>zmd1{ONWdK))=)mKC4~!VOeZ$$C2!_|ykB#?PSLNATA85PZp@4RBvN*# zg8ZQ|7kx8fe#Sb=r3)d+tmab73gBP0Ib`o}9Inn(>qGEW6ZLaF zUnQg6Chm{D`6|%(U6^3Iua=YqXg7oaa_zqB<+KMfy67UA+Y?50O|UO`-wQ3l%WJ;U z<`9jhQkhVHfW74*21Ab|o-;5*6w!jv>_i2PqZhc8Jto9V6LUi<{zFxS|K4Qpz$zb*N4b4p zEccRT#{Tk6Q2eM^(xb5B!VuC;#!{+Rqp_!zgkAc4o$p^)t=3r0Jr8qH< zaz+m>TU%+6-*x##3{QwbS%KQ6%GK3Z(?zkLIRu#SM1|_MpWGhnb7Plq1L16~V*XzE^w$sg z#y@_2K?4O?TD3fxN&A-++K#nGjPYs$*%u=Noc|b=FQiPaO4~>8@0p@H2;0*>Ofk$0 zlb*UjAu0G$;Fp3mt-9cEs3<_sbYpkvb(wHD_!uKQ5}H{njcmZHE+Ay^Oh8!M!ZuhI}3=F)^7{0JLx#`(5K2xkktgtG}Jpp zC(@C&l8#l0Hewd7XHtPAl4-btW^>ta#G?<)Qsg#VUAkL5a`bSgpSSQb+VHJurmbsp zcNOJs=2wkNe%#L^#s!?3TM!|a7xgS@nIvI4$Cglz-!Z#?U#el7OLFs z%}6pg;VujRQ*cDY?UoTWOOsOm2ES0HsO z^d)Dh+l@GgvFhO(_RI56S-;dW_=b4{iz_5YHyv=F=V2P3R2FbPS_9;?VFSgilnm61 zKfX9E3GHx$w>{Gs!t|m+tSZ{Gn^W21dn}cW#i(oA^oi{}2cL(`GZ&m1k+=U(_obZLMg8Mc0XNE8gCET1s+wZAH|*^4zs1 zO2f|Gmhjq-?g*g@nSW1a-Dr&~;+Kj>N8Cmoh5@XnODZ(>>W!I>a>*m8+u;LxJu`gg zKL3ja$R@FED-}-VHfv2V@N+v$22^@ zQI1J>uwNtS7RzM9q@>f5468uP2t=m$-a}U(=G>`W;mr*Vvht4WG3|wQZMQ=LII18G zhA@rG?z<{b>NZDpfB4C-Mgu?1cT0c~%jlMbId+H}pXZZM^U~AlJE0keTJhf2{7b$$ zkV4fnvt8o(ao_FS(>UG|$4DWOUUJOFB5>FUl77EKR!wkO9 zyV4zfQ0rPHiRb>}Nk}Ll>T+SU3Zh-c?LY5n8foY&`qIv%>$0G$?yG#+Y;Hr2wFn4I z%und#bvfb5!yiYINqViaGBIIJlNmvKIQ$anE6qB&} zd#PloC%EFp9lVFBHAit%g{ZakkH-+~t03NOp!079x5NNndrIk0z(1! ze{f(aaS2k9oed~0R=KD3m4TDae>1_u=BOrqDbTJes9NyG_f4fa!wBAzZw*>%zKfWf z)$vIqT^Axv%_iWOokqk3^u^+GCFFp)cxG{&e%E$GZrt&Q5ue2q3khY%;vIWXT%XYR zg$K=lb&8s$pv@?$z8fLPYbD+M;tR6XNp_E~&M|qq|5EI@T9TTp96-0IlmR%K641Qj zo)pei(nPN@=h&C>HgW8)+U&g-J&AX}imh4ead9x{E?!?q!$jW#(s>$G-UYS1sf2!{ z+KJC~`D^{M#PB%0$0&4k>KxmzIOufs;d>vNr;ZrD0+9%GQWaCW2t!)Ggzj!PX9FsL z&~(|0-2g7Ut6C1oY0GpHp!{WW&-DqJ6oL8XR|dNVaW5{pSUc>^EJl6~%4k{fRgX0t zn-ANo8VcGCj1U-pBVz)Hw+N_=y17z$(E7MdD~lEgW84*8A`nxzn@(GdW5ju?y!}Lw zL^&SIV(2YR8diIAHp>bR4r)2dY4$e|$_R!jx6^W>A$i@M7Ed|G;^$i93o7ZorUvN7 z#Yc<*b6l0RQ)d!2H?{kMKh-Ebu!zXf7V|z@7#V+{#TA)aE<6}v#k|twG370Qa+sp0 zbx;M~Q8d>39dQLNdg2$zx7W}@-BfRabJ>WPV7s~r5z8I#{moX@$4^Pq&T7>aYkqJy zy_PVq`50Xv=g|N$8vwfBoTA~f!D?l4`jUk@V74F{9g^5=4iW?G4_b&Wg}j{wX=ZVv z!VLM;d}p-1lL7AKEIB{Jkt5Ia650>Hds^#VvsHpICxj%%1-6F2BZaaBSou*t)T~X` z&C%mRW@eXJ&h`^nJGKIcHL<*E*JyUex*n_N(~(Zo;cyPQwzGs)gl1OtEB!!@P-=|{ zH}*wEuv-O92vyx&;g0GJPmCMA|I>WeIs67n+2^fi-Rx#TTiZ7w^6VAdl85!2v7+MN zd>g_YaD`vRST*0yn34~;R48>^(qQ6V|MvU;9h|jRbkSCFNHE5uuahiOlwpq9iH0T_ z*4xC?-TsNwl^F0x#HzGQ>NTyFW{R`_W4Hz^#bJy-dR%aXnzp|DtCpEG-S*?LWSOsI z@E37pz~&OARg1oUtBlq`MWBt&704S8brPyNybt5;MU#Gga~MX94#66OpSpQpK1uT5 zX!%(Kx^k#m9ErbIi+%!KYNrt!jYAn=0hO3RzFbp;!_PVDavQb&Z>D~r@%&MVz)|hc z9$QjwOr4ecraFB@c2WN0AUnIN2V*Zdn};WDZPsYEXh8u*1_73Y>@7l#fwv zPUU;ENmGg}z($cw>6y;Gp$LI8?e3$GH8=IH8kqF^{C&op*p zRWlZl(I!Ek=vM*GdWx?^+{?2x~b}~qt7+9!YVfaTr8ciPnw%;NV z(knh&7=w5Bjl(AV(f)Jcrir=1$=kV4;f)1(YDb=0JFwmLA@V+E7wr8MJ+ONZM2sTjG|ViTj7cx4Mix z-d44La`SZDH!Prt+Lk&w2%+U3O`$lk z!=gunb6{z;2TAScFcta8ucF^9qYLU5jia!Nn14#jz;SiHW*$9TP+ZS;Q+rS?tHx%VMxCCwh(Ri6tIVsgS)8E`a z(5^oKH3`$et+EM9vsXE4W2->+(=$)bgKzViTQcP7&bvLc2qL}p$06H;G)^PV=3XDc zLWd$>@9pKN$JyvD&Q`aDu?+{|MoMhDSLc1=wL4$&9c?}AR0y4XN~>hcUmttk+T`&s zxTYZSVs_UgmPaqqd+#{AwoVWj`i645a5ngGlrabfk2a-mRPy%5B;Yl3Uo83va3HW_ zWazzKkJPuyePev8dsZ)!Hr_53sXZWSZIDNxLDCJqm&;UMlpo*s$Fsf8`NdU=K>v+w zB8_VHZrrz8+Oa#Jl***4S7Kt;QkUDbky79*Ct6++M%pZ};7l!g5ka3|{7L=@>d0uX ztk^-@mm8N`IMw41EtMtUQ?^7|L{9i!Lshu}`pA0+CB#Y<8q}U7f*dqL#wep5U7YO> z&RfHtmQiX6fn!$YQ%X|~Peusp0pMNs?x3+>vJ%H9%&-!TZqJN zp^hAL*trE(`FHIUk2r~^!Gs0*AluSy9?%_|z)``_?(CSxp%m# zZv}hU%egfETkD5wz{~TC>ss2+aKQ5@dcOiDKXzx=Xg0P2+K{W$L{&R7qhVDT=GdtB zW=jWwsoG%Bj7MGJ-rU13&Ile+fi#oH{9id1<@NcLcI+~cl97^gk3&_wj#VS)A7P98 zDGk4#sLz>yRPHD~Hnn?z>vTNjJXNS2(JbZR!;fqO&==Y07zmAsmg4vL^ZBSNpyRyb z_xch`XJISc#YJaEC+uKEzpCWCmb~3Vf!@8o0|U>^6T`ns!2EvgI?EJ1$MTMtt2FRH z{C8Ky6qF2XjU>I8kaGB-WH^*vH0F`mK#y*yv&(un8Q`)xZArTHp4%}d50r#x*ll_M zA!pfFXjWJvV3+t0oBp{9RYfJQ(oXaTOz9%8^TSmwQuiiImo^&G4SPA|K2`RcDh3@4Y1NnmKB{+tjdA^7!%GkiEb>F-8WGdwl0m!u(zlRsxa}T{p$hz+>77u!Ca| zFZdp>c9|W-d;3&=YM{RgWt=6fqB{tG*9!`WsljR;!Q%hP->fOwe(UMsp(rtbtZ29J zyS}(z#ADF4CUOU}`vzpz*(n+WvMBR4=wM-N@sleb5tEsi$z@0eE>GuU|fQO|$Er~QrI4-F2`I?DMu z+BhGbSRusX7lrMtExx|t(nqsue`mqUGgK`r(tMMHg~tOgwWlO67#ct^CwPKdrp235 z(c~`v`1Mql<>jsiR5aIueE{on44$jWWQ4@$KhnjB+;oDde1_)T5@-))*McUTd{e* zTS8FLpHJy4li@UJI?w$+{uGNe7(R=tKf=pr$MsW?EYsDgsdkpSW;lZ)7`V|i##hj{p39)XVixt zp)TvuOp?TOg!=OxP{qr>^I!THvf1WVRDp;GHnwy)HVECmTxb1&fcyzm)cxG=i$-ko z?+LUHEF46#CXE#hSR2($S#}&Uu}VwWM=3d`O_GhD;d~cbpS`kv(wz)7e`$U7EzWKI zw3atlt2qkjtJmXt&u@K4XV;MkOF2FC=L`Rql;BeIZI0}^BIKBzy+0U$J&}$HVZ{Jd z!EMD)uFr#g?H!{gFF_wZ7`YoiyEHw`6gz9QY-IewC8VH8c*}|e9bdK&@Lf?vY zKkRPhzS`Ho5u9c=M3LFzyVVvZ!_}Nehi~ssOp=!TCR8V-_*H_WyS05o$6mMEX+}>N zeTjNwj%0x=zUn)C!GhVVPRQ!RFy0}FO&H&6s0n9uR_viDYe-FuZ7rU7)6f6-?@d?# zX%C1^-EeWd`mf&1C%@)FN+bWBTl;s#_&;`Q{}1WwKi&0z^nCs&{NVq`w~Xz`5DCT5 zkle&>J=O^ild2Er;GM6y zqT=EUbRg=ydxDY%Cb6}gfTKyjsQ>fbB5KU`ByUzr9TK=M2^Mpd`$qtFpX-FqdY(;H zrSs4_(Fz=ajA=Qik6@;)NamyQUN=FQhCfGd?eM)@BFvAoHpU(bcMdLJBWDic{{;C(?@H@ zF4^-%Oh_W(VX+aH>1uKtTCVN1K(5DLT$MVg-(^q)iRu#i2u3{PUSfX$@9izh3A8`f z#Jq_m>d;C@9p0B&@Nio*@7eCCuO$|!)v5JT4L|$CFCO&_?({7DQC1~zxQ+l9wOZ|- znB4J&hQJsIFON4BG}$X**GlK-nqD!@2~8+(>3EMumE;0l>t+$t&VWzK5ew+52IaFY z%@fu8s*&wqBRp2lZMKFG?{q(vA1s4Bk3fbj-6)jD?dvDOngZ%_dcO$d^cuvvu{a&M ze7YtJ;+d49tCeH5tZa}X2HpPdSsiXT_|n>@HQc4nzG7j&KvtfK+&e893&vKRO|F>z zKb*Z)R9k(wJ^Zw^P>L0IsNwEzr7gvZyA^^%km6oSf#NMraCcHHIJ8)bI{|_eC; z5G?S|dvVTpWQ_NW@r|+Xva>Iey?$%0x#pU4&sZM+JFaN5rWa))t@t=sZrOHo<`@)u z`=|ulONO1TOUn(rR_XcXo>2zJnPJtH&*TCzXnlo0m%`(x_kGO65Jq2kzXylbg(Mh= z=eZb7=*g5I;p_k8XB=<7IMrWvQMdT5kC#?PhiH@iEV?K^mRY;SK2mc=jKN;E++|w; zy>4Mv!3sH(EyK!1gW+vgRz2f**}QNmGT8~ZuN5==+IgA^Jmr}KoI#5@p1B_iHNT&M zwNmEA!H%Wj3e$XOpXiVTp}`jJFXOnM@tEw;esNJs#P83dI^vmp(B3)t(&iR|{5#&sTw z;{#@ld$|TjB-|Ab&X4GU8SL7^y1ajy)(KsF+cRpEOeqSf)>p5!8+`u!9eE4SY9E}( ze-EsqdgDx9RrzV^pK`M#gZ(F3?}yL+UR%XBBm;08rx#o5v`@P)4$xEQS&e>*riMRJ zERuWy)EXP+^+?Ygn=HfAmUB_G*U;#w1zH>t`#6gif8R3~{h>L7Bi zMFXzBN_2pu+yv7Eh znD;qj3@bmw)=elB*7_QBYd-`CjK)&oh^VSssS;0Fn(9M*mA~7k5^=5zmO1(onX#O% zeGi(Y(6CCoTki7?a`r1)O30!Qwnw2*#fP{x2nHy*nva7+{hEx8YF^wP$S_r7a_n!` z?BI`yYzAnz>WCPR3SW#NUn{sy5Q24BlM3|YBk;4xGGjveMVqLqduCli=U;XIR@C* zf3}W`1{wc^G@*vWiwSUAe!&zP|PME1S zea^kRi{Io)6*}uMcH0|{vab0s#L;eCR#ccQZvrUc{e`TAzB7BNny$eOR}yY!yilXC zRsX`IGSt?P3k~$`niXbC$h|dhs79!@Oa-_=C)gO_s;1~>^J2IC_+g{#mMaFd*sco% zv!;rVu_-d=Q^33zu*vH>=k0e$%3bkY>z+1fPBjm;dkC#96l2fJ>GuSsUoAFZbqO)K zIR=1b<^y@;DFWj*@b3VWgUZg432ls!b#3dDzmkUyWrj^s-UM~qVs@Ixhv%NsIUsAd zGSVUa6Fe19=@6!M(b|$|y-`-qA+adZ_h=H5_wFOSCWC|jfWx{X^}uy}e}<_BL8OpH z%kUE4&od@uE8)V??!k(GKa{)m8T6U6mY1dtZZQxzn<9EC-s|vi7|WKi+dd8{sdJJb zj0cfAgjMU$I5s{4OOeewbIzZ&ovJo-tNJ`nq8^chh)=$ei`6MhW|RY34cjW!Zc~17 ze(du>lfy`Ff-)q`H2DEk$s{JP3gqS)ngIZ2wO9T2Y9sAp_>xCa$Gpu9%j55aBI_RQ zdpU~*D9>p>z*}yC^*XHbsV| zKs-ci)JCpy_OvB0x&fB_{k+ET75#EEV{5XmLSN^}^Y72v!t)a2KgN8XdmmNUzDPAP zu2?Ir4@t*xr_H(heb=Yn7_Cx&!)8S^%WayK_ugGo6QxP;uhN<3|jES>UA2EYyTUuT)H zsJ}7pfc)c>OLMO9u({oQAbE#apY;bkb@)O9XwsBEJO@3YUo(c@@RF6B(vj+{mY05x z2v6hScIPpDFQ4$SHBanl*BkVW#_-2#sX?247cyx-6g~Y#e?_aUXlX%l?`_?Y*2wn3 znW#>(BOBRtLzS*-o>lsBt_D=#gFL(%PpWF=(W_2-6;-vqqArFf5S8v^MuNygE4*Mn z5*}{)?{nw0eTXWDa}nKYNt!t^;#ASm)kF`bRZ|`2n%?wxSNsr4&u3E|BLTWK?tOKp zmpr=j_YCQ0ktJRBAlMA zc2wDLK`@B?o-WxrCT{Rvx-}+~eBGJixrPuGIRLo9H z@EhQ=D(SaDkDr&$R%G)=X=q+vgxw30l6a}a28{L`--u7gbI)Xjo{xS1E`I*EWRP1kD10-ZJEwlq;VD+@(M~ zxGf>{VVe(GE?slq08+F+jlmgXXk?@UZ-kO5n`-W!7S~PpTonm)BL8Z>+XojLK?gzR zs4~;C04HYjx8s{BKSf#l0Hd1b$vX!L)mda-9XT+p^h@5OXtK>@G&C{ylYijbU=wbG zv$_+DQtCCTut-KVo1DMHh~78u$^A(3cY4bbnrVYGmZ1hNl4)1ON?X(sko+Uq!87f}ZUzdwD_ zbw4WIVNjdT04Q8KfVN#TzodOItoN+u1NPjUtN4A5_obMqdD}C7&yxR zRr3oE61Fov`uv}WCdlV2#Ca62pNYY$F-nBs6vP)6<%owJj&FznF+t^vgxPPUZdq>Q zp6mL|whZ@_2gfjNWfg*WPW(7CM&M=i0F4%S^K`0IE`>kX?E|X`C>6P1B@CJGbMfZO zFRk0tV&AT}s4*18W_cmr!jt3zSSA|7+b@;}61%k1yW49z$A%5gdb4m{uvJgwR#|gQ zw+KSOGRogqV=v%GpYB3OFKPAteY@YymglIL;dafeO|w@GG5FP)OZ1H{O0-zy7t^yAo?iHW7IZipDrR#er%At>XS4Mz0%mN zx0Gg^Pba2;@yNS+NV7pH(jj;_SVWbR4NKl24!b zIkSn!YozSb`J{UDNL(gh!}WA}s0EWU)iZai5D5Adav(aT4mu7_@XcD;m@3UEd*1+@ zfm{$DdaUxvE63ob4!pIb(7Dxj^XKnJ2KQZr?T?`Y0uN?pW+u93SiE+J*}j4;aSM%> zmT!A-j?#<~N>LNP&o{E@`c6-0nGzFPQy={Y7a)LnZWoRKfqMP)RRVC*8zG}}L3Vpw zVcduC6q7@nlbb=Qe@7+!AN}aL{y|=81z%#q4y-Bc*NyDnL$GYny^WVNnkaMT<`#C9 zwFd&^;U|NS1E;)$DoTl!XVoymQx)#slYI`S+xY#Fs#9StFV035TRk6ctUoz!Y34G; zyp6Ra8B;km&O$!o%6Nn6Y4sW$CF0GWGMgah9!P=TxZP4<*(!r%+mQii~yg^X-^x5@vV(W1AOM^T>-WFkseWt=g zbgi7h(#Bi@$6NTwit@L4#}C)&fpVI+aMnw(Ra8?x)@io52Px+Mr4^pK#x3(=X3tux zBLELB4Sss*Z^LxV;eFD%(tU9+pz17jZ(*Dk@22)xO@4b=efXegEvkCh`m2h!L~AQ^VOPi~9ykgy%~Lht*Xusy2uY&83oiB!6H zs58ZNeOHA#CnPK6X z7>=I)C=H>fGn|}llifD`MyuTR)8X+??wJ%lWAP@aC6jf>!v1jUyX%vMRTUhlY>XcE ztp9wT<_>84>{IM?d5`3=@ms%?A73zkO}hglKc9clWsx6d9ePie7FKc$kTK;Q>CUBr zTqlJc(0|oh2@icokYr5^E0H#a3d^8cIs-Y$sM2Ed6pXjk-eyr%vX;+lp{f{OR02v< z((iJV+zY}P6wvea7Ou4;B6~avKUdg|(1I0e7n@ z3}{{*9^1+t_Z#@k85~!zq$l(TBD{p>D|jPDbD4UM%4c9*R4+T3xMx8GwFxvnQnRVWMZ` zwRtjeS7*&QlzZ9yRRXra!AY9-^eTm}i?oVu){#~I>C#10G^ocqieYZ!izAB_b~{69 z35Y2e46Th@CC$GjIz7ESI}W1}Kyr-MoszBMRC^{yW8IU$ojw}z3^=R03Et-5VDqhn zznoTF0;$5gJm5(4Ll=Bww!B;642w zbo8aEUh(zpc{6ia|naRx=s`Ru-c z4>PjsO?*(#)=&QU_af-Jr}pKb&>L5a>xJ2S@2WY;s*9ti`l{zL&QuJi`Z#tsFZ!DI z)RL{~H9iIpo<0G4ma<_r-q^vowj4aJPdAGXtv{nNDORt+3KM+@VR81Y!jc=>SeA6Q zL+uZKJc&s_$5d;$phTE|Qx_x_z*oGtub9Ti8MOoj&~gEtCSJNnJV6bT2_EaC#Uqms zdYUf)8N`Td^00#vwpzrr)EQ5htTR!ya~T^n{e z%+Q;}_cgRoOBE;q^IfDGX$pMzR9gIVom?#!>I$Yjj49HLZOZuef~|K5$IC1Z7VwjV zoM#Oi$kWj7{Cr>VPux6{S#dyEM6Ns^j~RfAw7~2fc7%*-b3G*vbE$>BhjBh zD;qDib~2(5*SyXewLVtM`8Ky?Aro{F=x5kVw86hiqW|LK7kLxf1fi$LG;>JXI>Tq) zrTkNFv|>))Hv6MI1HYeZfN`E8Co39C|5|bmeQ_%DG!K-_#x5dYVv3o4`AEzeblODd(67M~n7;-0bI!{y0nZS|UX%xE>UhU)n10PA6D4 z3AE_c`5?cXQq^1+*_lRNLl7f2&{Ox)lo2mDRJA0Xy5%qR=@S9xH+%!j&c#h7h8Z8l zq+;wwjT#~fIA_4>Z)&U<%*O4A-#g4Ab640o*PGVE*@n($bs>4+jM0H@p_4ZZbTqTN zCuWIkdKt>ivmgDw_F#KA5l=&}=-URb?X@Yg&wj+S$UQwAmJ{fqon|q5JlYfZz>YK$ z`bJGMgaTe3<~t#wBwM=wM|-%?!9xn;%Q}+9swVeXpQW7eh>=8yb<-HBBj8?VI2NqL zrH}I>ADEusX9F~w3lRSZ{X0*!Y)HFR783^yNL5=jcKm~cIkUI#mM<9U+aG=3&zB{1 z3Y2bG%edYZI@yUrL}EHp@* z8jqPQl=_N*mN1n%udNIkP4w8L&6uk)?L8c2?MXtej#o9icA3&ZjDNOIJE_X?1{q7) zl%^q-=c3Fuqwey%Fr>-69pf^`Ub`YZJR==P!iVKSfxPyDq(4W0l0OwIAQXQ6(5Q$r zsv;qcYM-wDZ!Dww$Bd2Guf{^JzcY4FWtdLgd%FJa!#_{(KHQvX1g*_&bq%TIUn1ME zfEUQ+W6^e@CuOm=hnHBi+gi;i=12~+oV|#-=4Va8nxwYnNCww&8@2hxZ2K_oGnxD_phD|ya|(UddP$uAurfhEw*{+P%` zcXlJ^3lS4m?C+%R$+m4>LH_1#D`#Rpg43QZ)Q&$hzvi3!k!`ZGmQ&k9ff?(_r(L7@ zg0@JCY}I!gAf`nd-4Ooc$LsIB&gC`QnS|*}S4!Htb~Zsm*|x&ghd~-t8Lj@Pgk-re|m1C=9g*NAKDjfOxKbN14RcN3ZXCzdyOnD0pwb6z`qsB<2N!cGq& z1(>g@6`jNdd3J}2@hkT@Mbzr);?yp_jI>34u{rt$eBbBnPNJ9MZMu(3vWS4!{leStEEH7m$&x=>GCsMnXW@$Gy+11QDarQw?Bh%ae3U+sSUwe037jk10X2-cTJ}8&p(KnSdcZib z`?5LB>1ahd`ug0Ma4*$Ad4O=D4JSnaySre_b(t7w4LD*ArW=+Muhl&MZ9#MXYtG1{ zlT|XX+biVY>A1u)&vs+jS=KnZH{nf1_2CDmQv}c@R8Zj>a~D)WM7>H0y^+6lm#MtG zC|MNR@ax@2Pb|V;v>tEa_3gK=#rJipYK}3!wDAEZU2;e*dG3t5H;mo9#$)wb`>g~b zs&YMS=#F2txBrgp85?x@otT5*@QgOSVH{N%OTU(YjNd%2aJ$ghXLWZW5SpMhJTl)pR_VylEGz7EMgfyx562x#sMasJE7)1U z$tpJF=TyCAXC;Sn}riF}t3ML&B}E3l6Ga&J%93zUZ)+T2-B8Qzq$GJT0f zUA;LgbaF3zFY6)#ewi)W20Y4bjC-KDok8i0`E`?cLxXz#0rdB2$;awnH?LbKA)3d{ z%?S*f)u^3X5xFGvzS)Srpf*9s$_6^X@W8Up5vc{sPV@^MxSH@JTG5Z}VszsgQG}=W0$%o2z z$ZFyphzZs`-zF%5@$(mj{c^aSkthrfp^vRH$tvG}x&1lf1wmoXq-@Cj?HHZvLY3j2 z@0OhTbY;s9uSfkJyQ_{Z%ad*0%eM49!15vrm?MPou6iD3AV_FrxM+$8*Cy}OMH%Rb z=}Rv#!CmLTyEN(7i)<2WLq{=UqM7s{o)uHr(%|nFjTa zo>`F$til?QBaaw-Sh0UNC^?G-fc7)tQCj8F8rGs7yEf^v6Zd`Uy$RG>Fu}1xIL2Ktsk4N}$RW=C)k<+> zbOBRk(bYt0CHImT#>)odJ>l02+xA=B}%92GPj8-nBriVJ{?T=cBpNIV8kl_d}nlQWVT*jpJOrQiOzJR4a(H>LE zz*DnFZ!mO8erI6u+xscdp^gd0<9RH?T^UUHU*hu7u@JI2>Axh)UYyQ-GYw+=&wTY~ zNUmS7Yv$bg`3hbNr;Gn-oPbZAnd=()p^%9xeoGO_Ot6clY6nhLoBJrEc-o z6a{~<-=?8vS_bd@DAtT-_r$9%Jq*RoicT(UY4dxwbne70mhZ3D)F>~0fJ>oYu;clL zHo(2FX_e8lUaE189bwH0LMbaSc7ELxh}UuUo~i zgM3SF7Io{`Ae_+xLr`}OyA+e0$D=(Oghav30ZPlKt|e=>)=)*20Pb07!xgJKvjBeq z&vMo`p|8DzX**NZZ#uhx%=NfRr=6CTnD8%_O2U)#6=kMxrtc4j)VXb}%~Puh+^@|~ zWPx_l;#WM}-({z-fem67PlODSG-@h)EGtHny>u219co5K9%gsx>Gmx0i4%E^CrO*C zwUi5nNDb1vd21+*$`WKz@FFx3eDmvTBVbsM%X#EBbaW-S!>u?5*w&@I2?!wqI0q3y z-^(iH1I)}>!p&Cke3{y(KW4|`yfif!XePdWd6Ol%W=qC25MUm5GoY`U)HvR%)FgV3Q36%!WnPwNwR^E`+!t^YsljqA zviQm~7YWP18=x(4Lx#p{wJ2?__-d8xlpYWTJ zEkyW~rm8u3vYcefeFm$7UF^(*r0}`hx)kVT4Qndzh}_ztReqj z9jx@^{Mr8sZTEpA^4N42gK4cgaB<78fySLIVX1J2pLP7m&mMC6-MOI`4^maGRb*hb z49>^HY-hjU4P>@x-+doP*OJVRGIJJ_dOd%t8Pa)8H9N3Zp+h1Uys9k&5`!r`@M#8~ zy;3u&h~{^K!a{Mrsy{Ia3ql~LF{?;skru;OwSIn|Kvx>!qTfz=o%j4wOsU~@rMn!A zqsQ;+Uld*nLHQ0VfHOZKm9wVWp=gRr)-Jhd%%3xxoCB)*RoIPWteq@{I8sOAfLJ%b z|8lCIue1Cu<;VbVukn+d%k!8`zM)|Pj^EYR+vmKYq$sk0b(PT1kMETw6eQ%ES*s9r z;G<*ep-Xln{dBNHb^oB#&GI%Q9W6!fTbf3E4_T&^Y&PVbR2WNA)bheFdDryQcPpcG zTRFvm%n^2u_nQPR^73Fq?X{a<4vC5Zt`D#yZneoR`psWgjjgG%`E^!`Jqh-SgYWmF z1@jnzu0(z8z^1Xp4KZWZ2S8WGkQ-BRg;#!<-&;8Qbm6~@f7%zs&EPvTo7Xr+c(C4$ zJ8NC>h}rGVdKD!hazGT+Q1zYrtlk+V2T z=IE0cEZ9Jkh;{a47}g=lclePZ8>&k_62Wt^v}p|jq*&OGsHQH!*jwDXqw;PUF?kh2{A(c9)xHu=p$scEU=3Y$?OekMQ zUWYLa&$0e_gb7+-n?Xef5ga1hp?~kfw8(!QhCa{ZA&F;Mxj$xWDN3N=MK>IBlU^LLRFe&g>NGN)a-}5aF!{bN_JU4t=bhi1{N|*Fn|T7IrzJ?GRt6 zpnaNhLA+uYJMzF?C7t$otJ)jdj$wf|c#+lN0Kl0{M%cU~-xB~d-z#jYefxOJI8zU# zc|+O)X}G`DdM?L4792Ql=*b7nY#KB+8O+{NrN4v9yqZ8gp1$lLxA?EQx)&4qKa1x774kuQyCRd>kssMrZ`s~{VpvFcGwUZ& zRRP^U<^Wz85{h-m$?e2_^iIAp+Ag1Bb?BrG;cso3gjwD=BbG+}HW8gClBu+6CDvW- zUjwhd21vbxR~Y#{Z%5dFk`54w4*WIebf)v}rE0dL_j+T7;i*^jM^Lv|bl`8@hd$+p z6wXYv;yYcje8Jr-C6EZj%uwynG(E7r9u_W}-{&(Cn^)3LVGWL!W0qEW$^ zaU`%w> zDhF^OUX{k9Elwq;>@KifzGCn3545E^;P>l$+X<2wd5u?tqLs#%{1@S8_`Jmn7HbrtDIb?-cToT{QS8U2pA++3)4o|U_Q zSuhCidcnuCv;}9ZS;5G}5b@E>1;LIQ{Ftd?@OdmGIB|uihDN?8x8Pcs+Jy(=EnzIx zsj^F{aH_0*ixU{1*dSzbu)-#V5pI1X@46M1iiLUMis}78p+R0CF-Bu!!M(oO*oP4I z&R1dJD#2{0&&2=Ihf~4p4xs(s8Q5p0QyDbIH5QZ}aM-mbacg0Pwj>u+eid>Xdz3kn z>Ha9+sHtJjb~SXO=ib)B@3rzCGk_PdSsWw&ct{I)1_S`!gYGygY**tR;qW8wQtna` zs$Z8IiQI;=?3u6WUUVnP-#kd2Lz`)zHOo@eqpfXMSFE-yZ{_dSh>ljbgX`d$+>c>p zQfX}?JjXA${+Q1dUmh4PfGTwRBpWn91iW|%?AH*7>h$`spdeE3ItcO&TZ zh<$yLpT}Iw4x=$MYj1(uea?U6xVKYEpN}8zc)Z}I@d0$Se*P_r*4fqtuz z4Kx28sd}#Pf3(s6TAN{Fe3k#Cr*`+cLq|Q+?v@Hz#&^j2QY$9AMTda43AbxMFXJE$ zi#+>hHqB2PhM3dN0HJ@6*w~+Q&vqfDuRip@^R05@UyhOv+G0=DLJP`;rq2#0JlH!g zy6>wOL=4~l`P&iE9+gxKiQDn{HtZbegQoyZEZoQnax@liMr~>_=>@$322l_5X_D=i z|KQsrU-hd0f^Dl;y%ipD*`YpLdU(cfyj%ovf$FH-4oqMPQ^~ZLzcd)}guz@&Q)9C# zD$kqzEliXIct!@hkB<%`8mh5}6sWd8_*~>OZZO6Y&#_XSu&{Nc59i$gqGzG)miF!@ zU(rH52GH`ISFvsRX$TnJ*+OyK#qNj`>Hap1KjSDv7=Nch+9gG-?($;0OW&oC|8h_? zz|((J_0Ixf@S0j=K(C`T(w4$!MjZxLyd=82#Yn>@dQJ!{%<|rJ`HEixv{__4ioCiE z`XThnu;y?@2!=gWQL^jjV6 zD*l5D@EEX!+Lf;|aD_34*1SsAM7|BDDQ%K2i-k1G~L0bB)Fo=ML>gkIND;vP&&EUbv0m~*n~RQcTO*rG;2!70A?M)bMO^iNdjrYVEL8-SyAyFFZ?8c z6`22z^bscX&adExNa5NmcfaHn*|+29Gv-9appNu5+HRcInq!&#vK2e=QT(F{SSQg7 zdpAGHNT7C?l4ZJ4mX-6wf@G=)foC_%PFVnIV{t?q=Ef9_L_XdEkCEg^?74nDXYOq! za;8!rW3zuX{}VCt1MOQd;2ObK!ORMuo1IT#H!}mtCfz&@RXl#?o~y*l-&ZQhuxp2k z3-|iH&A#dYFL$V!_q-WgxHuODGmCuK_c+ILru5@XuYVu&IIf$H0&ao!$@AP;xz@bl z*OH+jk^4{e%z|Lb+XId^QfCX&?-`YcBttn!fTJmzRxUgdV6wJnwCOJnz2bLP#ti1JNC>i(&-~jeEtr2EsU5F&+Of) z2#!KoVGVfs;No~Sj_GIMvDte$?jxTV6uUwJuao|a<^vLBjzFyJr~UPV*%ILg;^Mo; zi0=Dtg|eX!T0jnzO?E&arJahr3{nOjZMKjXaIT!Yf3}btZFNL*NFv+aSOx^vVTw~7 z8^4$-SOwrExtlLOBm5QKhf&H+fnpe@&V7A-*5`kY`7GoETxvc9Y&Z+|1jWmph?lwXyf---q;`c#3YWHstBsb^*1u+El5V}D=~yy+^Wxt|;Ih6Q>ez=t*}EUdGC-)pdy5z|aqGTzPqXu8YzU{(h8%(x4R1+E*e z9a5m(Yr}=8-?m~~r+V9JmL@l1fvEFE$a=bp&|>zl;Mv&C)@nl`@f%^ZUpDh%S7ZDp z|GLgmQi7IJ?k~s*qpMj^2|6aNFDN!36JX<+GlE>A*{8uZTSK;E7NEb2_8|)5@lyXv z`U4A`32k|@EHfQ&*BYSmB8);lql5lH(r6%2OuR|z_hXMX`$*$NrxFjN~YL&K3Fm7u~sG4C4CV;Oq+LU^YCqtR*{b&)S z&GS;Tf~Ov$4SGGLYIEl`6l*P{`pMTD44tpp)Y|a;6Q;M!_0M{Alis4uNO1HJ9gTC7 z@8aT5G3B;cmsr>P*Zn}9{8SvUH$ZZ*R$|XL>AUzHdJx`vQR#DVskM2c>}1@;4Lt0u zdmIhEQy#T*?~HUgUbJI!h$h7qZXGO8A?e1?>7+!A`us;96dt(BS*-Lm3gPq|Y6}E>SH&$xL zcyhjHE`%^M{5md_mr+}pvpob7^)Qy-zd4#f5c1S*J@q{Zv~BDx+VQfX8O)?SJ=VSG z?3O1x5wcl#(UdRW0epqthtawOxu0EHRvwG{hI!bQ|91rLb?6!Se`(|wqW|YW>t8CU zyL$tG!hN4u#{sT=IYx(wUhnYQ^8%p4k-F9#|=5HVfIgAgmyAFI*e6|86r4v|e+VuGn=I zO8WA2IuT}TIz=plluCOm*BvVA@R_;mz(Hsm5+_>Led}|Un6(3t*NvWZE!9HJI?iL} ztzLKw`R-9q6B(&XvI9nq3Yx-&EMYa$2mBhjB8?R!;IvBl`N+00Tb#+fD2tY~^t({D zG4{c^dc7p&%U_tVCNJ!gi~r~q!J<==eHGDgsWf``a9=X<@e4}r^GuI!{X)r6pJK8J zU&hTJS}3^VrN15xkIc;j+}GMMI*rG;hwTmfSpDO>2SF|>@bX$Or>co|14Cc(&bUf| zW;uW~95g5eHM<|?@S+!94rZAgIM#QE4RALQA>EVc_2)Z>{G+Wi&P0=r?Co&>dr27a zELtm4cOAQqq4Elz80oO2%X2=y?_^_bUbKaEOFup3gd)3N+rJL*2G|{MbC;%GVb`#H zoG_QaFEYPw+OQ3FZcw349?J!e>)&S-vHx(n2xUDm*YXAQxVjhmMT`Yg$zPsEOT(;g zp9k7>A~OVDT^2%jE6~60bw*3`cj;T*%Ln&lbY@VDVg%FsA-nZcGsCmJcIWc}71#TI zb&89yL`XuZmo}|XPaOmroUXx8d^L5f#)VD^+S%P~;iOq2ts}CLUp0bns|P^D=iYH2 zG!m^9is`j|-QWDD@3jH^_%`!nKC_d>%GCBMA2AiY*ZkY=V6$4+>lF9j?^p2GBuZyT z+0G*971UfLw5&%xL8cumy9*JR-*TZx=%tmxhK2fhk1sMZ+gVsXO#T!Q!Orf%EZ7sZ zn$~k6Vk860m-l_NN6{sD6p9=*;IzsuJARH^8bj_B>To8trb@()vNn&)hH{&YR_lmR z97uh-jWK}OC)PvMUh9ju(h`YF`Jd8_Zf=bO7LJdKn=(`&?#n;T6(rw)KN}x|ydv8)~SHtqoZ|+@pyEH-wLvkq1eA>Y@*LAE8ECE-6JT@wQLGmb*hOB6v z-0eeYl(>4MPgPj7R+1r9(|pnMI9=&*N5rd9g97%6@Eo90slnpfJ(7&J_?G*BOt1e} zPk?i&EH4P?iM!Kpt_Pyq{tp1nThl$#Y>{Ipz&+v$_n zI~xg7oJk~w=Pevso88|AZWn;A_Rnm0tVd+o)MO;bWy|(3d<K)_|AfnOH}F+ zfQ6$){xhB*z}dT`MBTi_#X)nLmr(^wpjjyEb-#e3MQQA5$nJ)WHVb131|%I70ty3N zMvZNR=!;iA)7+jjtSr7=oa;Y^!~j`@_VLsUIW+z{0ieqrcg2qu{7*ur)!l(bYv#4# zw^BgbyZNcB7BC~?=ksg=Q9k^{kGwfn9%hRtI~D}0c8vENR0l6 zOPI$HLBYVyyl=L9*k-l%c@{@_;#NV_S`2`+VYDMCB6R`u+q+WsqTZpvEy#s&*zi8! z>_lgHQ2`mFQgF0KeRldA%2~-mKTv-R@r)nLf_^YX`FUByWA#CoPaE9gM9I=xBd|RU zv6t1gTM7O&athBQvJ@FIli)yP=2sD|$R4g>GAzT*<9WEyC`sr$p^SIy3m=?bH)o%9 zx}^*_#}8OxSuB-!Sv@g1&Iwn~P6VNX`!5ZadKcc6?aDK4V^iO4$O20|ne|tT#OBy= zh&VfSAXM~Mxr0^@3s%uXh1|>x?l0?me;#^(8$u?Sb8mDi_=S)TTV~D6v9>0<*lraX zjYy*sf-ks0GH$Cmk?l^~W0fj;&m*u3lYJq59*HY8Q>Fv(!SIyb-WxkfS0*PG9vYml z&1vy9(j%$#>*ZO$jij_UE9=o%TXrzAPm;hPsQF=I0+yQ7wc5ojb01DF_6`0)UXC!h z>}L@IopqO%*O8{nJs|691anMS52PEhpzl=a(L|?-k8sJa7WZ}E1w0!QIux%PSQZWi zZz~Va&41GQ6f)71@z6AEpQTofYi=fPtrXwb)vD_~X3ni!rbq1m1ong4!-Qhj#gToO zl!Ir+KM&hdu&t#$qB`fgyi==IFCyXPq&fB~BV0?}*WqNr51hvvft!rvEY>g?B{MLC zfh+)~nC6cmy`6#S19r=;OPMSz#d}rPx$VH|AQt4{l&0VH_|Bn;&W^hre zl|3bh3lHtz?W^DEbhvV-v%sE+@l_APXD|Jc74>5PB2GrEeFt|n^DB*QQ&bo<4f8F< zY3m+DP`Qk@`Y!)KY?hFNGWYN2Zx_1~wOAgT%Mfhs-gnEAzxs=7aewud7rJ3Lc(m(; z+JLDg5J~_0TLb$%bY51M{~ElG*fGUX0x~%azO=bXs&9F3uR=w8VESsA#Jx{jM|jd~ z%75_mgnJ?!p(Gr-vg1G2=Eb?91^9|a`E8*yETBNT;ZxDttU(UB^QGtl*`VvUh<2Wc zdKS}G{#LoThJ-5hONpWFHCqM*WSd6qFUn$;CV&7z4o(2iX%UOJ{pF2xsn@oS-Eqp0YBs+}ju2%w>(n;*f&RfYU2W)BGg6|0SLb0rOE}Rn7Gh;<`4Ju?7lYlDH@JwnA+4i)jmJuD61 zuN5qiLXgI!hht~#QH_-*{+X_a_1Ur)bf#qb*-H*)7tGLvljaaoh@EoAtgm(V<18kL zS)`gV;3EeSod2T-*3%{JB`CyXG(x%9e*y>$aZ;hJMvud#X z`qEn?yG2f1oI0*pu*iRo?XwzJGo&4V3rQO4*8KJ~D9H~0sm%W|!~0*&!9ittl1pKf zoA+nYJ+1t~;+c-XEZPwU7A?~?b~)=&#f0;PCxJ$iT+?^p#^R@AqI(20Ar~?WAHT~x z_}|dYGRxgcrlN}#`S6;HeDC`-O2k&I5Q*_wJ?R6yYLwb0>`naP0y*0}maLEgxne#h@NgIS?*;P<2Uxu2O;`kDEklU_kB`RcB&>Ma<>&Nz(C z3ou8Aec}qftb_Y|_*blaV-wzW9qm(qD)WH_!6uX%tN)o!MTZZn{grX2^X&sRqU-j_ zh^xPC0Zhu5ncKU4i8ITqworGFcc-7NPh&NtrfKzjL&R0>ym!@$$=w_+PSv<5-IwxM;=V zn>h&~zqxFJg5`-$O$Y=x$|~hfte=*azux0^PEP?rU)K&KHrlFWb|gfO`;5O0fowKx zriAdSEP29?bsKK-J~ZBu(lZqJ9v}9#4cS0Dg^7gc-1La~Yi;s=@QwcJdboEiiTZ2m zuxheG&(yeLA^H}UKA8=ujsnbjigvX}$djP|@p25H_fqoAm8BLAh~N5kebRco*qE6w zFfp~3e!FK^;ni}=0$A$I7%483AG~?R;1h48h3XZ~>0r(3W^5e|s$48N{|jwdl9Oa% z(A;jJm^9BA+VIjJkz_Z>UXo_%Tc~K2WLLPRKC`^4XT1%|-Oqazp)mH=vf|R*95lG_ zh$52~PL5KY860+`MQMMie45o)@40smiEVBFG67I&#cvWLtM(;A`1ne?C}X?gTau3%$HwJwDCB;z5+C-**=TcX+8`=d8EI z<376UAc5L$*!SO8nQ${!p%m<0SM6of>6rt;LMV^SPDgY1+tbr!06<$}UV?bbeN3-& zR+EXBi}qT<`NL6%oU+%iVT$broXLaFhF}726!8xt$7M#6W?OucU~BD`eg)-A&bUc? zClr_uOg-(`w9mB>|IqDe*pJn#93v5DAraU056&)|w_=CD-EiC{|25vJN8ncfiE8*x z=~*c_jB&VBcqDJK(7<mQQx!*)7Y0qz-J&2TXSH3)PG9{qLUj>x*k9vuTeW3mkk|(0d`#_%bj?3EG0A7^Sm1M~HTiarL+JNynmdneD=!!SlyXrRwucvp$%j zuyOVocrDGY*A+p1Te3z<9ehL-r?0)7rJU=usMJ-GR8BT18gwp9fvm2QGHJ> zOpNT?BlJ&qD@n4gG3fwxbeT||h*Nwi9ct9rxYXaoLJ3UI6LrV>ji*2%m9kvI1$8s4 z7#0W1d;9;Yd#|Xb)^=@Jmo89giZrQ9QF@i$ zf`WkbqDb!oQbLP#0xAL`5>$E(y@(;ygpSlm4GU#4|AWs#YD%c}t#xPI-W zT-r2Y=$&Cu(A5yL*ERBdtONnTP|7}js4hAHbNn!)YJ#~X37^| z$nTY86YY+D$)|#nhR|9yhTBCjjckLMG$$7l z-1QT4IP_0E$n*y_*fo-^Cz{|!Iv1k|s|@}F?e>3S5F{EFv_`yLeL`rgja3|g;Yzt+ zusoL5X6?rMY+x9WFTryA`-;NowoyT2?fVe31UFH`S*_NaYw|)_HWh<4y}Ztj+s37* z-;_i8!;aTebHfmYDo_<#f62Rbl?F8XP3dfFS5$cRza=ZWJA+gE!f9~%CYtc{($8%- zUHtykjBOj^TFT8!oobnqsng*af!=aSV1`qvqV9jN-(PvGu3_N8o19G~wlOe4X+HY# zVV22f$I9W%n2h)Dxdp@rO+1_T*;^9`kL}Wh_Tt{WpoP%oU(__cgYHq91zh6z*}G~B za*Z!1Cu_3eYz&XRvX_J%BVUzSIWLo(oLB+N0hs3`X<9$SY7!-+wVCSD`m*Y;L)?>i z?=nsN@#T%|5fw7eyGHrw z&yBSq9DSmxCp)co6Q0`C#W#rJ;trdG65mePUPGBPsl!gk$`amI@JIWGtt5(+Xo|q! z*eb3uwUWFTCB181)cZ5<^h9)JiT7y?@XpRk)0Y$%=X!xSKotdaIqLY;`(IsJ>7t_m zS8Wqd?x%K6atSSD)mLeU-=||ZZFE~YW#JS-$t2uXeP66+%7fouxXbf?cT8KKzb&#V z>v?sT@gts?2IO$)V&17_sy;1#da37@+iB)4(5hGBphTPaAimGu)_9X@sC|=V2z$br z{5(JA`GfZgr+sw|L;Fk8Atz&&1K!dnyGbK>2Ey?7^Y*({exA`tsVlt#J_64MFvRPH)UGQr#cT1fJ zZn$DcibJNEzm}yDY4+Vgf=3w^XT75Raz<0U()#MD(%8h0mKdLwdc;7OC_w;iZM=F3 zdbW7>nb%D4{DsQO=}KwSAu9CgFHie`5W$q@yAe112P58jRC;mX$|WyIidgaaOIIVg zrYP>`;GWEE;`EGYsc>k$%bS{g|Hn_O{8!mdb}OaJUdiyF(JfKYs(Zvtql*$%$VFxm z)$+Fll>oYRy=&Ot{cT?;Uld(3(u#Sb^Ve%z(W}e1A|vWtY_n3L=3xh;C@qvLkrS># zcR7k3rrxge4yWmx)y^U?DrB4rQ)U;uyKo>pB*w{+~Gf{}S0L2XLPBNCS(ct!ErBCWzio8cdy0 z2C{C#Hn7hTIQe&0tq{&U(QLU?qa}NJ0a(x~nb&A;6r$+dv|w5`Bpbx%rn@}+Td-8o z!X|nbTLGr&Q`kH@ETG;#n)(D>+fDoVGoR}b7gasX%bCmBIgf8JwyJ=$;5KrM-Lt8p zl~;0W!)CQBaZ+p6Vp=opxs%NoAZm-)&Fh@XvUape=8A~35&2^H?wmM?P5>P&w&eg2QH$OylOj(9T z5lb=wU7x5u5dXymcvDV;o0~vDc7cQCa@#?uEr*$j`NYB_a|&@R=IVt(gqvkzm2|qY zWX!pV>%N=*r88=0^-Ubl1TGSHhUA>4nthXZRuJ6;8{c#bnS1@I`g`^jyA4XnSg0fF z2yikI;fuvy-)~1k4-0+5 z8Z@$a;Wihq2#Uah=11J99Ykx()n(Y@c+<|;cMru9^%o~d`%!B{GOGD1_d2T`cQ?$x zIF>ij5WW>XN}68P2NJ4y9>|KnLP#zR%z3A`H?IRj9J__fw zE6#VCUtBPW95ob|z&vdDk{vuoH182yUQj|}6hCPNluStuHon{_x4YyUn>}2-ywztd z`@=Q;p^~aXTmNpw*L}Bn=%p>8@km#|&HY{)39IjkhIy!B8-|cnd1Cuxs66F!h3^&S z8d?2!MO=6yt{1iPfES~N_DdqpBcc_(#J4@#_BpbN!g9s|fq2oRa_}bYL4P_l%#LuP zB`k0syu*i3Mk#i41zqqP?_E1Zl^7^<@fBGcx zNpk;@O++{|HlbKUlC9DoGGfnYBI{kUM@Ak4nq$Jm=k}D~P8Tf!QLTn8yX*IwFF*Ab z$q0HyuvSpN5X{4P>k8UMR*Y zuls~j8++iSqH?Zd2+wOs6UNu-s~Z1a#vk%*TC1L{;*HE;`P4%L`s6b=S?vfj!Nsa7 z+jjtg_v&!bd!@o$o1MJbRqal_DlsFTi+49KX zXFlfvGmfY!q_%2r!U;}aZAB(&?Cmmz_%cIpjI?mR(9+c%i<#+M?ds(AgoFqL}k{FzYiGl4xPxc;5)Nx3(NHb zn)&upwx!qLD2O6_Y&+-2z#f101BOrI@hx#2MM*+6yf}URn21FYh?JoNj+NPcw1iSX zE}#5geahioyQ65UT)Grwh3q^Z`K~5)t536g=;mdPA1lsk&fX$}WFK`s)xBq!%9N7~_ZZ_1PNDcJDmi@fz zQvFcy#y5=7mXeNbE!|2&W7P-HZyez_#&x5WI=4cpe?myKnZayN3?>x;9~9b~_9QlN z&ZByk3)eSWo2b(VTNRo6k2oT;yC);aI}|a2(tuf@8|9dW4!R|YiL0_ITL|pl+yn|> zrR^32_GW`w`~Ejmiq!FyliV?(KOB@ZG)L!hnfvCC7_DoqDYIskr-S>6owm z&x!)^xO-pa1GcF4%BP*DHv%_%=+|7uJ*9w6Xvbdr!*wJ2TWXw<(J`du6hU;*trp>R z(UlR?j-b+?-$@#zqZ?!02?kFZB%Xkq^NyVAx_VVOLFyjAgsSzkCPVVX^+n==chKJ;$~jN;kG(oj214pU*Srlo=k$d5 zcH3buOufhKV*E$ES6G%Zbx5P{5Y4rL%_(V!F}j#-ps>nNZKckU*!pIpEj*frjsI@& z8Se9ncVQSOF?rBb0A(@>{h`s2U!_cA+AbRc?Y4F=Pf5U+*Ee(ms85Bni>iV6`7OX| z;xJbAnT!aO@XV=xs$)mx@#7oIjVrDT!SxUe8%?{^vYTYI!tuh+(0;^G z_Iz}s`%N4ou&@opb&n0b!!~4SV?ov%AxS3p91Te(2yLDEEiX|7xS8-Ux^*flzPzk2 zm{4s0WU(CF+WwFdY^EK+~kYbXDOL(TsVh5LqAZ8mP9DmTwhMMN@|NK@u7En z=`)BMJ}In!51QN1AaD42)YJQMHul#2ouxXp%NQ1_m-1HRK>uOR6T{oHRFdfqwn=Ux zASmN}QA1;&vSf<)LYCvc(VR$ylT~xog<_Jb^gHnA%M;GehqXBQ0e-(ieej9J3eR4z zd-Q26Iz+A~JHs)KP;jA-*Wua&a2SUj63)@>H~sw;rZJcM2Q z3P38vHF&&XBfS5#oJFJv6ZKc_v6-`R_zzkMzV8h)J?8`IV8f5xLj{nGv{6Ce(|Ds6$^ezy8 z_TV8`0W!W#)s!M50Hw=3A=&4FX8nxfSX~vn|1GKS6=@wQk|N|~B^;_XsOq2W$4A{O z-3t{{Z}|F%F1IS)a+Sk7H>KV}o@zPmX`^L%wB?c|^SNwNq-9^gb2N{WV*juf%zFVh z?a^ex<(|jO(2sa}K7K20(uZ}8)P6c+&F38n(-?ys@W|6HKV#oN`L=S`$A0O`m;wu? z)cvG)LVwJp$5H-m$mwa`%!?;P4ilo`&Su zDwS;V>4|Y*V`~(<_4a4SR_$0e_aE34BiwxU15ub8$*s>gR(}1f%^;@N0r|aB();6p z)=!uW__lrcyS*p*EOl_p0OXqxZf5DKC%=aKD(t%9`z;4YZ7s6 zomP+fggHDWUCaB4M+%~3gaB5+fFFWSi|aO4f+hTDHxAo0BB|ec3T3z3)46pxtR<4% zeagng4`o1iwxVCBMHsvb?gEZTNn0HuW95530AKu+_x#DGM?`}qPpAcC{S-J8`+r6t z1Kt4KrP{LcY|n6oaUipTXL~P@3CBWrDlfHu)I2Q?)_>i&T?b<(_x=bSn?H~GYpi>6 z0T`Ngx)j`J_N@w;184yU!IhN$fNBxS)c#I*yisfTv5Wx9v(GMuZx}6{{7k%TPxucn z+cP$^IGWYWucVGpPvJgrajb$XMTAdB&BwE59diLyvzh3cY~Ls!d-ct+j42*3%$nZFt!vW?jDCpJ?Y4o?%y82@QEnwrx8jMqKg7R@g=ES`kJF zW3qzuqOw#Rbk^%^-r7#bb$ndg+M*=jnIqm*~?<2Kxv-BM+%yW1z(;V** zhNsFwMz&LpqYV{ik^$V|XZHkfB9a-?e}ftDbdYXj zmXC?MinspdA44tamBT~9Ty4Yg`lU@S4l}hXWVFTneea@+m=S*YZygQgt!lwiQ&FHq zC#iYZCzy@2UD-?adAS@%52+QTISivrkoKXSqN-rsKXzNcaUu3~4Mp+m+AD=6l>%J| zspCyg|1)OcF8aetpj(^YEtC|%j7%T5nbB1A-MF2Et#ir~mYJba{*7FP4HI&jF9liy z*p?Z?xsVg4?r(W)?=IgRq32ex!UlYkTd&U!31vnSyiB*B|It)eV$1|m(@fo|Y>lyM zUYho;<2k-R^F4L51Nhx_o==Repk60(-^6v(E3e2gDda8Ou`3{9D3SL(> zzJJV-I_p{ygQ%v5ECl?qrpNaLtR6FP^&Cm1PvpOPdh=sM!hze%Nm(x-4X`}=anvUu zN3EL3>GYjno3edude@w$3>R3YbFO}W%#s%7*z{5BLK6UtgH+9y2=e*kEPH&z_Y~td z@AElN1A{(bOz+!B27IY|Z{>G>1?kZE4RDeE@ZBBva}yUWD%X&hycs3Qo^>}4_hJrY z^0aJI3Cn&WYRoWQ+6!mER4KE9fCpBXS%^(K>;n3q>VGU3AXE%2B1YAykRvCSw{%># zb50n(s3~|k`J`tQCV2yA^*7j5|H|@|o@y{6TGCw;sYi^ub z*!9~z61yyQvi;(QjYc%Alq^EfUwcgQv1rX=4t$!RtoMddAWwe!*$$8~wsc9jv_*lqyp1GJoN zZ#Y?Cs~Ci2Jrwldi>sDbir%?xlSg`m!5a6m)c7_7&W>VoH{MMAwRmW5)tc(*?KITu zTn3ETBILw0&R=#?R5mlxJ;q}i0XH13wK1raE;|(^ts9OW+JJ5lwlsa1`(oJ;v((V^ zb!+dFw1gFxZKaTUjrq>gldZ_cxvO7*QXN4sw*#q)j!++_zG+@oZ2!GYxf{@KkFF(< z?}{4X1b&HWd*9eD>506K;Ug9ljxVGL8Ow z);v)DDVJ82jEjcuGkZ0uMxsZuN#Eh~R8OghScb4ONdaj1_v!Vsd&+$~N)hn@b^oNM z9|8|&%b4?DMaZqWYAw*yeNSyKE5jQdt}C(THXA?nF5R*nU9PAoVAWQR3%@=ka*UqT z9?J?`9;WWBtup+aQKA`tOdbmuZpt?;tVSy{KB!psE=U9HpPBE$bi5@LfCu&DxDS@9$c@v}=qRQpGp3IID&2l+E zl5NjBSC?fazToA4gw%@YCqfocYGzN zWc|#NKL@t71h$Ik2+BNu2Q(SoJs*?jZUO{dQF#c-rszw8fnnZoeRUgeaRj!LA@MSS z)!qgc+hFfPpU~Z+@UL-v$5r*J|EBDTdfhZrVB$dJbIF9L*X*diUTTV^M6lOTC4gJu z_)54};z{ku6H@fYNc=we+Mjm9y(5Jf!v$q;#IVPoQH$M`Pu+(@S|Zy!fPH_P^5`8~ zCxGw%V@Q!ep-h2;A5>aoFN6yo?~8v77XXX8Pt_kt(}rC9 zVV5L9`E2q5EaZg^j0qKMHuX)ZHxN#eNi^Der()iEIsigkhoI51xjHlHT>8PjK%i;Z(s+ESFV9nN`3p;%nVg4%cTM_s8^C#mBx2LV@3a@Hr2QOV-!poB=l zG5P7n;=1lIYfC+zUNY`vKI4`QBqxR)^)C1W+nqIcyNiiOrJ0Jp*ZWY?uu~^=fvdI` zTfcb3wRw+Ys$;yl*q;5gMothTn7)zbcxeCRjB#vA_EwmkLg1ag3N0Yc(N-Ee{}dWo zMv>N?XC!Lc&rXLew*v9<0X+5<4LJHId&=fxH*=6Si`>mIKh@(!1`2QxRtW;Khi3ROnQ5Vz zgBfXam%q8^lV1&16r`A^CTpEh0(^&WsEH!0Qlm{hasN_CNIh!2<4v>QX26LH33^)z zvRUn*_DP4DW+!vdxyM9Hy-bABg#mC5bIf;Ea+6kyYOCI(p*kpw^rh0dp5R}v=Q#Rx z741rHb~zyPBC*&tK`kaO~#`h zn(|d+CXA+mP-GR*O5SBd9^n008)(|pv^iDb^XArGrL%#5h19(-BzGWPUxISjtv(EP zuCPrXFGWvwJOyu0@hf3jhnB2U>_aZqQ9cGT(uQAO-ommPhA;RE1nwI*EAKOfP;GfS z@8%f|75+Mc%v9zFGU7SEoZxi6dQ?wKZJg#~JZIS}o0r~z+Pamk zXzzmF-z^Ag>1>kdAV}#C#HFa?(?d_X%(=A-eZ=A(mCXfgZE7eio6LjYxp|A~M&Xw) z7}Oeml7p`j)@FO`g}HF~t)C_{g1nrW`}TNF#dBu>hPE1BpMdsOlj1^!LE9EQKlp~Q zDZQ=N1V7h>1}6&?CFR{aq^KxfYFrJKnIJ~=xAAO)8N%Nk$~WnOHUiEm5`?pVD&WwI zZ3@$*tk=ota6)+g{jTUr1J>rBgz{MYVR?&V2n``>jf31AS93J$p4( zK)a+$6%7uh?<>~YQS|$8obp(_@8vDr@fl7UWv$9!`s4Ou$V1pUi;>?B7@3Y;UlG}> z8*2{gxP6JR>_)8JduG?QeRYW*~QghJC(cG)foKXA$> z9)hnR?*K03z3jMD<^Nt54__%7+HXkpraUH}*PNH_yk&o>0*;3T4$kY!>yx)8`1T{M zIxa+5jCG{I#8-rncrH@-m)@0Kw;5SZw^7O^lFTgTClA$OgGZ2WRnggqO-y84BalIQ#?~QEFG0rSb+7W}e;C$%}JbXisEg=tu6W#wnDq0InRy1`IXelPHn4 zoP6agYbf0(ZD#TF(r;E^s;{t=v)1@TMm4S>yI+n@C1)zQK}io%FA+)F&-;r@KlJRM zzT6)%I|2?!1GJAlZNiH>PR?>5;`>dJ?-Fb)Ek)NzGW>>)5HB}Wk&-M&n{Yk%O`L!) zJ9}ofBEAW|!s6S3Hjtb28N_qv#m71jRy&j_T6rrH6K- z=Zmqg5I$$cdu(zxEt9TQ*LykZ2V}M-dzxh-f;Pp>PA21b4VAAi#`aX!m zA=Ap%6W@C%+l~`+;%hYOU_n?S4(9Bbp*;2ac-FQ3t~=PF=J@w(Nu0~x7dW`0d-r4K zgdQyA0(sQB!iAAc4IJ^$`uoYkyj6(P8+>W|+YW5SkXnOb+~T*VNnbCz4CkqrIpivy z1cyXV_+I6}HTCL>7er|_{G76TS>?ccm+*xbFCQTeq}Yg!?VNf6>72F)uxlY(dxoTvJcoAH+J&g|E3)KDyg$BZZ1kWrpJ+u zs6O2YcDPE|ywyKm5qCU=DRc@P;Q7yW{y&-&{`uz?-g0{9Yx-Z@O5B6DW5cYNx$G>u91A#@2@4D|t)Sj`$UGlv>GaD{)^W)0YX+k?jz%}iQl6XcQ5XTZSFF5 z_YC$^%&}J1odp5|7v~QS6ZvMV(R(U@93B`czGo%gbApuASe%ud}xap9n<9-o@ z5ok-09Q#RnCukM#!5amT?VH%O_}IAP$#VYPed9<=OKAv^8Ol*8N$~VebO~OgS}*0G z8;}$&F5^+uQxNKegwaVB{eWpSqXsn^QP=QkP1IUdVVjIxhV0@?K`J9@@2Uqs_6B5l zbypP zbD@5WDpV)w;R)!+dt5(|?~@LezH`;?cC~r+5z4*w8Kj$s z+s=9>pA@9z$u*yis#5W}ef|P(lZ3N`wZvt+(1GmApGwaA_;M{&pXx}Ui}L`VGe1Y+ z<)*u^vOmVOyt6y~-8(vFWG5q!aW~eQbc$)fi0T0j`H8~N4l|+U8ktcpTQkeXC5O&d z%Q8ngkd5V@<1&d@#XFOz-N$jY-v-9l>iszw#mATwS{2M2I7`I>?rQIls^Bmgy#u5C$;VTank1X@U1-jAG_*sx^vdYZe-@? zO=RUmhP+o1MfLQ{$3XItFs#(;)6Mo?E+XGbD#!m!Xz670w?Wd=9Y7ou2 z6X+7m#5=COis~6)XtYEX^A2iNdVVCPyM~CZfMEQn&TmN=0hhyJKZ{K-cg{^0r46mN z@)Z-83Ng2y8d7)crvv_*G4YWqv1E%_Om)(xD9@*|L&b-vY=GUMQ5HS zt6?#_((GX8@xZN?=-HGN->KOf^D2s1*`it^l%)|Jhas@`GjIdG`kzu~oNn}_cm>KZ z%pFdU=c-5M-hK%4M`0CgMpl9Q~I!}BbybH^Y4 z(q2v9A8O0W&86;2_nkh@T6Kkwa)eHMal0-)SQZ81_KFd_ZZA$R$RcC$dv`sYUg#R# znYAq2k8M~M7G{~IVl^JR>G4Rcs4f=tV!A-*`!pzvAR=T|{zd`mAEI1TN2hUTBGjvJ z%AAz9S701Z*-YfX4cH_BpJ|uwk<;5yU28PI7j=jPZ!pFFXdUgbtt$d#a<$zk=>n`ftDdoaN{U%g&9 zA=-ksySsNwA}(uw3iGqG29hPZu(M}R@W}a=y_6&l7M`<OcTBnoKhIS1zp^#@lYOB(3H_eRi^3;22Z8{?G% z3(pp{+8WIP!9OH3y?L4im&2{_f@rilIQ$4s^0&#`fmub~?rf^55hk9b$O+XA6HM=| z6OW%0@ly?6gx5dcmmVV2tv5HpvtBhtJL|!mO~9(E{)Xk&bobdIj>XnP>+5EN2O{@0 zO6+TEgSPq{%Q!M<3U%_2E~B}%OxGn@%eRpvxs}2*<>Y4JDzvt91EPZa!=I-UNTO*L#SpZULU6ae0cUK4vsG9L|qYZGr zBn@xBW^kVLv3Rp_=x?c6bwSs3bI+RNPuVgin58=qX*r$3)d~_=c6kZ+B3aq87`7UP zgn#w{bI_=mi5h}#&vxRaS`Nl_R5Pb!(!Z!TeP79G*WH(`Q-Qn?sKrgA&}Nnqti}+i zD_XkEIA^W*XfZZCpnw_TK$tiIhs_>%t@{h-BS530;SYf3+_Jj2Whg|gzF4`7lsErW zhhiKrH|N~UO`ry<8(O?0oUvugH?5xmH9FT94z`MuT1aB}fa*TaZ0^vAz38O4QUlV~ zMt@`9`si`+B&tPD+R&tGZ1YInCnE+g`k5z{f6@OpNQKABD$j7s+pXgBD=nx-CfDGd z(KY{kXC*n*iM53CEvQZMM2}eQP;zfypKm)T=M8SoGfHaK)BRj`v*FhsnaOwxRBkPF z>RO--(5N7?fCkO>H+)yi14!%OuQIk-osYD?7n(O6xWoFPcw%$&R+BQ;AmzmSo6&)_ zqOC2`1}+T$&ai5Vuu6spBrl;)NYyibb*lmU&2?wN(|$}X8yVKgwJ_9hxmsIQ1`af+ zcbZw2-S>){k|8x7&=#r-J%tox*^H0v39TLGeDUWoH0+2*hBjtTWKLf`G<`#4KWOPy zBgPgL=(-pWEr#VrG|i5m+AD_pjzZJNwN8VRy4Ly?5B5r3v92`*I52yBrd~QK9@!1IQk zGol2I`B92=2itQ^CKrGD(S^;ya(cWMYw(EsMD7UKebw1>3Ff17;t>g7gJ)CZX`cY88>w@dMhUQ3(vil<5$2Mu|IpwpDCKkHoCEN83zK6*TtYjJ-X zlVs_{bHr4kYvm#3dWQ`Tn0)V9I_zocA$M=1cZsF>sksd>cxjP{I_b74P(dGGN@Z5nEh(X7O&7{ zHVVQWbBL6fky1i&t=U6Xo!7Ya@^UxWO z)s{rcJQKnD6D=a*^Y%v`OE8&zjE=_F>(nECGwikYq> z8qVr^`b+1Q57>jff8tnnB{E|O31e#XtN}xKaP6D!!sNAvwYMr?S3Kq0!KS>>R#PwtpI1vTA $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait +# FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${TrainingTime}'}'` + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +# train_accuracy=`grep accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $11}'` +train_accuracy=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep "Test accuracy" | awk '{print $3}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +#TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*'${RANK_SIZE}'*1000/'${FPS}'}'` +TrainingTime=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $2}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_acc | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk '{print}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt|tail -n 1` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..a280282b1 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,181 @@ +#!/bin/bash +cur_path=`pwd`/../ + + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="AUTOAUGMENT_ID2891_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +export RANK_SIZE=1 +#训练epoch,可选 +#train_epochs=3 +train_epochs=10 +#训练step +train_steps= +#学习率 +learning_rate= + +#参数配置 +data_path="" +############维测参数############## +#precision_mode="allow_fp32_to_fp16" +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=${cur_path}/overflow_dump + mkdir -p ${over_dump_path} +fi +auto_tune=False +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file=${cur_path}/../configs/ops_info.json +fusion_off_flag=False +fusion_off_file=${cur_path}/../configs/fusion_switch.cfg +############维测参数############## +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage:./train_full_1p.sh --data_path=./datasets" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi +##############拷贝数据集########## +if [ -d /root/.keras/datasets/cifar-10-batches-py.tar.gz ];then + rm -rf /root/.keras/datasets/cifar-10-batches-py.tar.gz + cp ${data_path}/cifar-10-batches-py.tar.gz /root/.keras/datasets/ +else + mkdir -p /root/.keras/datasets + cp ${data_path}/cifar-10-batches-py.tar.gz /root/.keras/datasets/ +fi +wait + +##############执行训练########## +cd $cur_path +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 -u train.py --epochs=${train_epochs} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune}> $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait +# FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${TrainingTime}'}'` + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +# train_accuracy=`grep accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $11}'` +train_accuracy=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep "Test accuracy" | awk '{print $3}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +#TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*'${RANK_SIZE}'*1000/'${FPS}'}'` +TrainingTime=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $2}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_acc | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk '{print}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt|tail -n 1` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_performance_1p_static.sh new file mode 100644 index 000000000..e36095992 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/test/train_performance_1p_static.sh @@ -0,0 +1,183 @@ +#!/bin/bash +cur_path=`pwd`/../ + + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="AUTOAUGMENT_ID2891_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +export RANK_SIZE=1 +#训练epoch,可选 +#train_epochs=3 +train_epochs=10 +#训练step +train_steps= +#学习率 +learning_rate= + +#参数配置 +data_path="" +############维测参数############## +#precision_mode="allow_fp32_to_fp16" +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=${cur_path}/overflow_dump + mkdir -p ${over_dump_path} +fi +auto_tune=False +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file=${cur_path}/../configs/ops_info.json +fusion_off_flag=False +fusion_off_file=${cur_path}/../configs/fusion_switch.cfg +############维测参数############## +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage:./train_full_1p.sh --data_path=./datasets" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi +##############拷贝数据集########## +if [ -d /root/.keras/datasets/cifar-10-batches-py.tar.gz ];then + rm -rf /root/.keras/datasets/cifar-10-batches-py.tar.gz + cp ${data_path}/cifar-10-batches-py.tar.gz /root/.keras/datasets/ +else + mkdir -p /root/.keras/datasets + cp ${data_path}/cifar-10-batches-py.tar.gz /root/.keras/datasets/ +fi +wait + +##############执行训练########## +cd $cur_path +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 -u train.py --epochs=${train_epochs} \ + --data_path=${data_path} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune} \ + --static=1 > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait +# FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${TrainingTime}'}'` + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +# train_accuracy=`grep accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $11}'` +train_accuracy=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep "Test accuracy" | awk '{print $3}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +#TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*'${RANK_SIZE}'*1000/'${FPS}'}'` +TrainingTime=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $2}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_acc | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk '{print}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt|tail -n 1` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}_static" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..d8a964d5d --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/train.py @@ -0,0 +1,302 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# import npu_device +# npu_device.open().as_default() + +import tensorflow.compat.v1 as tf +# tf.disable_v2_behavior() +from npu_device.compat.v1.npu_init import * +import npu_device +npu_device.compat.enable_v1() + +import os +import argparse +# import numpy as np +# from tqdm import tqdm +# import pandas as pd +import joblib +# from collections import OrderedDict + +# import keras +#from keras.optimizers import SGD +from tensorflow.keras.optimizers import SGD +from keras.callbacks import ModelCheckpoint, CSVLogger +# from keras.preprocessing.image import ImageDataGenerator +# from keras.regularizers import l2 +from keras import backend as K +# from keras.models import Model +from keras.datasets import cifar10 +from keras.utils import np_utils + +from utils import * +from wide_resnet import * +from cosine_annealing import * +from dataset import Cifar10ImageDataGenerator +import datetime +import numpy as np +import time +import os +import argparse +import ast + +sess_config = tf.ConfigProto() +custom_op = sess_config.graph_options.rewrite_options.custom_optimizers.add() +custom_op.name = "NpuOptimizer" +custom_op.parameter_map["dynamic_input"].b = True +custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile") +custom_op.parameter_map["graph_memory_max_size"].s = tf.compat.as_bytes("16 * 1024 * 1024 * 1024") +custom_op.parameter_map["variable_memory_max_size"].s = tf.compat.as_bytes("15 * 1024 * 1024 * 1024") +sess_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF +sess_config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF +sess = tf.Session(config=sess_config) +K.set_session(sess) + + +starttime = datetime.datetime.now() + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument('--name', default=None, + help='model name: (default: arch+timestamp)') + parser.add_argument('--depth', default=28, type=int) + parser.add_argument('--width', default=10, type=int) + parser.add_argument('--epochs', default=5, type=int) + parser.add_argument('--batch-size', default=128, type=int) + parser.add_argument('--cutout', default=False, type=str2bool) + parser.add_argument('--auto-augment', default=False, type=str2bool) + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str, + help='the path to save over dump data') + parser.add_argument('--data_path', default="", type=str, + help='the path of dataset') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval, + help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str, help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/zwx1138188/tools-master/precision_tool/npu/debug_0/graph/dump", type=str, help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str, help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str, + help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str, + help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--auto_tune', dest="auto_tune", type=ast.literal_eval, + help='auto_tune flag') + parser.add_argument('--static', default=0, type=int) + args = parser.parse_args() + + return args + + + +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + + +args = parse_args() +#===============================NPU Migration========================================= +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "L2":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist="../configs/"+args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file="../configs/"+args.fusion_off_file + if args.auto_tune: + npu_device.global_options().auto_tune_mode="RL,GA" + npu_device.open().as_default() +#===============================NPU Migration========================================= + +def main(): + args = parse_args() + npu_config() + if args.name is None: + args.name = 'WideResNet%s-%s' %(args.depth, args.width) + if args.cutout: + args.name += '_wCutout' + if args.auto_augment: + args.name += '_wAutoAugment' + + if not os.path.exists('models/%s' %args.name): + os.makedirs('models/%s' %args.name) + + endtime = datetime.datetime.now() + TOTLE_TIME = (endtime - starttime).seconds + print("TOTLE_TIME : ", TOTLE_TIME) + print('Config -----') + for arg in vars(args): + print('%s: %s' %(arg, getattr(args, arg))) + print('------------') + + with open('models/%s/args.txt' %args.name, 'w') as f: + for arg in vars(args): + print('%s: %s' %(arg, getattr(args, arg)), file=f) + + joblib.dump(args, 'models/%s/args.pkl' %args.name) + + # create model + model = WideResNet(args.depth, args.width, num_classes=10) + model.compile(loss='categorical_crossentropy', + optimizer=SGD(lr=0.1, momentum=0.9), + metrics=['accuracy']) + + (x_train, y_train), (x_test, y_test) = cifar10.load_data() + + datagen = Cifar10ImageDataGenerator(args) + + x_test = datagen.standardize(x_test) + + #y_train = keras.utils.to_categorical(y_train, 10) + #y_test = keras.utils.to_categorical(y_test, 10) + y_train = np_utils.to_categorical(y_train, 10) + y_test = np_utils.to_categorical(y_test, 10) + + if args.static==1: + x_train = np.array(x_train[:49920]) + y_train = np.array(y_train[:49920]) + x_test = np.array(x_test[:9984]) + y_test = np.array(y_test[:9984]) + print("x_train:",np.array(x_train).shape,flush=True) + print("y_train:",np.array(y_train).shape,flush=True) + print('=========================test Parameters =======================') + + ''' + train_ds = (tf.data.Dataset.from_tensor_slices((x_train, x_test)) + .shuffle(args.batch_size) + .batch(args.batch_size, drop_remainder=True)) + else: + train_ds = (tf.data.Dataset.from_tensor_slices((x_train, x_test)) + .shuffle(args.batch_size) + .batch(args.batch_size, drop_remainder=False)) + + callbacks = [ + ModelCheckpoint('models/%s/model.hdf5'%args.name, verbose=1, save_best_only=True), + CSVLogger('models/%s/log.csv'%args.name), + CosineAnnealingScheduler(T_max=args.epochs, eta_max=0.05, eta_min=4e-4), + TimeHistory(args.batch_size,195) + ] + ''' + callbacks = [TimeHistory(args.batch_size,195)] + ''' + model.fit_generator(datagen.flow(train_ds, batch_size=args.batch_size), + steps_per_epoch=len(x_train)//args.batch_size, + validation_data=(x_test, y_test), + epochs=args.epochs, + verbose=2, + callbacks=callbacks, + ) + ''' + model.fit_generator(datagen.flow(x_train, y_train, batch_size=args.batch_size), + steps_per_epoch=len(x_train)//args.batch_size, + validation_data=(x_test, y_test), + epochs=args.epochs, + verbose=2, + callbacks=callbacks, + ) + + scores = model.evaluate(x_test, y_test, verbose=1) + print('Test loss:', scores[0]) + print('Test accuracy:', scores[1]) + + +if __name__ == '__main__': + main() + sess.close() + diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/utils.py b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/utils.py new file mode 100644 index 000000000..acabd2071 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/utils.py @@ -0,0 +1,42 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import random +import math +from PIL import Image +import numpy as np + + +def str2bool(v): + if v.lower() in ['true', 1]: + return True + elif v.lower() in ['false', 0]: + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') diff --git a/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/wide_resnet.py b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/wide_resnet.py new file mode 100644 index 000000000..ceddfa2b0 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/AUTOAUGMENT_ID2891_for_TensorFlow2.X/wide_resnet.py @@ -0,0 +1,101 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import keras +from keras.models import Model +from keras.layers import Dense, Conv2D, BatchNormalization, Activation +from keras.layers import Input, Add, GlobalAveragePooling2D, Lambda, Dropout, Lambda +from keras import regularizers +# from npu_device.compat.v1.estimator.npu import npu_convert_dropout +from npu_device.compat.v1.estimator import npu_ops + +weight_decay = 5e-4 + + +def conv3x3(input, out_planes, stride=1): + """3x3 convolution with padding""" + return Conv2D(out_planes, kernel_size=3, strides=stride, + padding='same', use_bias=False, kernel_initializer='he_normal', + kernel_regularizer=regularizers.l2(weight_decay))(input) + + +def conv1x1(input, out_planes, stride=1): + """1x1 convolution""" + return Conv2D(out_planes, kernel_size=1, strides=stride, + padding='same', use_bias=False, kernel_initializer='he_normal', + kernel_regularizer=regularizers.l2(weight_decay))(input) + + +def BasicBlock(input, planes, dropout, stride=1): + #inplanes = input._keras_shape[3] + inplanes = input.shape[3] + + out = BatchNormalization()(input) + out = Activation('relu')(out) + out = conv3x3(out, planes, stride) + out = BatchNormalization()(out) + out = Activation('relu')(out) + # out = Dropout(dropout)(out) + out = Lambda(npu_ops.dropout, arguments={'keep_prob':dropout})(out) + out = conv3x3(out, planes) + + if stride != 1 or inplanes != planes: + shortcut = conv1x1(input, planes, stride) + else: + shortcut = out + + out = Add()([out, shortcut]) + + return out + + +def WideResNet(depth, width, num_classes=10, dropout=0.3): + layer = (depth - 4) // 6 + + input = Input(shape=(32, 32, 3)) + + x = conv3x3(input, 16) + for _ in range(layer): + x = BasicBlock(x, 16*width, dropout) + x = BasicBlock(x, 32*width, dropout, 2) + for _ in range(layer-1): + x = BasicBlock(x, 32*width, dropout) + x = BasicBlock(x, 64*width, dropout, 2) + for _ in range(layer-1): + x = BasicBlock(x, 64*width, dropout) + + x = BatchNormalization()(x) + x = Activation('relu')(x) + x = GlobalAveragePooling2D()(x) + output = Dense(num_classes, activation='softmax', kernel_regularizer=regularizers.l2(weight_decay))(x) + + model = Model(input, output) + model.summary() + + return model -- Gitee From 66dd0ed30a507a6864f8f1c51859f42a1f91b1fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:11:17 +0000 Subject: [PATCH 31/54] =?UTF-8?q?autoencoder=5FID2495=5Ffor=5FTensorFlow2.?= =?UTF-8?q?X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 284 ++++++++++++++++ .../README.md | 170 ++++++++++ .../modelzoo_level.txt | 3 + .../requirements.txt | 4 + .../run_1p.sh | 2 + .../test/train_full_1p.sh | 167 ++++++++++ .../test/train_performance_1p.sh | 200 +++++++++++ .../test/train_performance_1p_static.sh | 184 +++++++++++ .../train.py | 312 ++++++++++++++++++ 9 files changed, 1326 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/run_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/test/train_performance_1p_static.sh create mode 100644 TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/train.py diff --git a/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..fe21e55de --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/README.md @@ -0,0 +1,170 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Image Denoising** + +**版本(Version):1.1** + +**修改时间(Modified) :2021.10.01** + +**大小(Size)**_**:324KB** + +**框架(Framework):TensorFlow 2.6** + +**模型格式(Model Format):ckpt** + +**精度(Precision):FP32_TO_FP16** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Benchmark** + +**描述(Description):基于TensorFlow框架的卷积自动编码器训练代码** + +

概述

+ +- 基于MNIST 数据集,训练用于图像去噪的深度卷积自动编码器。 + +- 参考论文: + + skip + +- 参考实现: + + [https://github.com/keras-team/keras-io/blob/master/examples/vision/autoencoder.py](https://github.com/keras-team/keras-io/blob/master/examples/vision/autoencoder.py) + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + +## 默认配置 +- 网络结构 + +- 训练超参(单卡): + - Batch size: 128 + - Train epochs:50 + + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 否 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +相关代码示例。 + +``` +npu_device.global_options().precision_mode = args.precision_mode +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + + +

快速上手

+ +## 数据集准备 + +1. 数据集请用户自行获取。 + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2.1 设置单卡训练参数(脚本位于autoencoder_ID2495_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。 + + + ``` + batch_size=128 + #训练step + train_epochs=50 + ``` + + 2.2 单卡训练指令(脚本位于autoencoder_ID2495_for_TensorFlow2.X/test/train_full_1p.sh) + + ``` + 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡 + bash train_full_1p.sh --data_path=xx + 数据集应有如下结构(数据切分可能不同) + | + ├─mnist.npz + + ``` + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备 + +- 模型训练 + + 请参考“快速上手”章节 + +

高级参考

+ +## 脚本和示例代码 + + ├── README.md //说明文档 + ├── requirements.txt //依赖 + ├── train.py //主脚本 + ├── LICENSE + ├── run_1p.sh + ├── test + | |—— train_full_1p.sh //单卡训练脚本 + | |—— train_performance_1p.sh //单卡训练脚本 + + +## 脚本参数 + +``` +batch_size 训练batch_size +train_epochs 总训练epoch数 +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡训练。 +将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..725a45e30 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:NOK +PercisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..b8a04881d --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/requirements.txt @@ -0,0 +1,4 @@ +os +numpy +tensorflow +matplotlib \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/run_1p.sh b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/run_1p.sh new file mode 100644 index 000000000..d549010ea --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/run_1p.sh @@ -0,0 +1,2 @@ +cur_path='pwd' +python3 ${cur_path}/train.py > loss+perf_gpu.txt 2>&1 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..cfcfa58c0 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,167 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="autoencoder_ID2495_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +train_epochs=50 +#训练step +# train_steps=5 +#学习率 +# learning_rate=0.0001 +ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_fp32_to_fp16" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=${cur_path}/test/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file=${cur_path}/configs/ops_info.json +fusion_off_flag=False +fusion_off_file=${cur_path}/configs/fusion_switch.cfg +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done +############维测参数############## + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 train.py --data_dir=${data_path}\ + --epochs=${train_epochs}\ + --batch_size=${batch_size} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=${e2e_time} +wait +FPS=`grep imgs/s $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $2}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep '469/469' $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $6}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 + +grep 'val_loss' $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..8f4609336 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,200 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="autoencoder_ID2495_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +train_epochs=3 +#训练step +# train_steps=5 +#学习率 +# learning_rate=0.0001 +ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=${cur_path}/test/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file=${cur_path}/configs/ops_info.json +fusion_off_flag=False +fusion_off_file=${cur_path}/configs/fusion_switch.cfg +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done +############维测参数############## + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 train.py --data_dir=${data_path}\ + --epochs=${train_epochs}\ + --batch_size=${batch_size} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#参数回改 +#sed -i "s|${datth}/th}//io//tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_C'/|g" ${cur_paath}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +#由于loss和性能取值不连续,所以每次只取每个Epoch的最后一个loss和性能值 +Step=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +2 | awk '{print $1}' | awk -F "/" '{print $1}' |awk '{sum+=$1} END {print sum/NR}'` +Time=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +3 | awk '{print $3}' | tr -d s | awk '{sum+=$1} END {print sum/NR}'` +TrainingTime=`awk 'BEGIN{printf "%.6f\n",'${Time}'/'${Step}'}'` + +#输出FPS +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + + + +#输出训练精度,需要模型审视修改 +#train_accuracy=`grep 907/907 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $9}' | awk '{sum+=$1} END {print sum/NR}'` +#打印,不需要修改 +#echo "Final Train Accuracy : ${train_accuracy}" + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +###下面字段用于冒烟看护 +BatchSize=${batch_size} +#设备类型,自动获取 +DeviceType=`uname -m` +#用例名称,自动获取 +CaseName=${Network}_bs${BatchSize}_${RankSize}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep 'val_loss' $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +##获取错误信息 +#系统错误信息 +#ModelStatus="图执行FAIL" +#error_msg="EZ3002" +#判断错误信息是否和历史状态一致,此处无需修改 +#error_msg="Graph engine process graph failed: EZ3002: Optype \[Conv2DBackpropFilter\] of Ops kernel" +#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#DTS单号或者issue链接 +#DTS_Number="DTS2021090622224" + +#关键信息打印到CaseName.log中,此处无需修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log + + + + + + + + diff --git a/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/test/train_performance_1p_static.sh new file mode 100644 index 000000000..16576049b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/test/train_performance_1p_static.sh @@ -0,0 +1,184 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="autoencoder_ID2495_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +train_epochs=5 +#训练step +# train_steps=5 +#学习率 +# learning_rate=0.0001 +ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=${cur_path}/test/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file=${cur_path}/configs/ops_info.json +fusion_off_flag=False +fusion_off_file=${cur_path}/configs/fusion_switch.cfg +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done +############维测参数############## + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 train.py --data_dir=${data_path}\ + --epochs=${train_epochs}\ + --batch_size=${batch_size} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune} \ + --static=1 > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#参数回改 +#sed -i "s|${datth}/th}//io//tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_C'/|g" ${cur_paath}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +#由于loss和性能取值不连续,所以每次只取每个Epoch的最后一个loss和性能值 +Step=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +2 | awk '{print $1}' | awk -F "/" '{print $1}' |awk '{sum+=$1} END {print sum/NR}'` +Time=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +3 | awk '{print $3}' | tr -d s | awk '{sum+=$1} END {print sum/NR}'` +TrainingTime=`awk 'BEGIN{printf "%.6f\n",'${Time}'/'${Step}'}'` + +#输出FPS +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + + + +#输出训练精度,需要模型审视修改 +#train_accuracy=`grep 907/907 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $9}' | awk '{sum+=$1} END {print sum/NR}'` +#打印,不需要修改 +#echo "Final Train Accuracy : ${train_accuracy}" + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +###下面字段用于冒烟看护 +BatchSize=${batch_size} +#设备类型,自动获取 +DeviceType=`uname -m` +#用例名称,自动获取 +CaseName=${Network}_bs${BatchSize}_${RankSize}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep 'val_loss' $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + + +#关键信息打印到CaseName.log中,此处无需修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}_static" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..7ac9adc40 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/autoencoder_ID2495_for_TensorFlow2.X/train.py @@ -0,0 +1,312 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Title: Convolutional autoencoder for image denoising +Author: [Santiago L. Valdarrama](https://twitter.com/svpino) +Date created: 2021/03/01 +Last modified: 2021/03/01 +Description: How to train a deep convolutional autoencoder for image denoising. +""" + +""" +## Introduction + +This example demonstrates how to implement a deep convolutional autoencoder +for image denoising, mapping noisy digits images from the MNIST dataset to +clean digits images. This implementation is based on an original blog post +titled [Building Autoencoders in Keras](https://blog.keras.io/building-autoencoders-in-keras.html) +by [François Chollet](https://twitter.com/fchollet). +""" + +""" +## Setup +""" +import npu_device +print('npu_device loaded') + +import os +import ast +import time +import numpy as np +import tensorflow as tf +# import matplotlib.pyplot as plt +from tensorflow import keras +from tensorflow.keras import layers +from tensorflow.keras.datasets import mnist +from tensorflow.keras.models import Model +import argparse + +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_dir', default='/user/MNIST', help="""directory to data""") + parser.add_argument('--lr', default=0.0001, type=float, help="""learning rate""") + parser.add_argument('--batch_size', default=128, type=int, help="""batch size for 1p""") + parser.add_argument('--epochs', default=10, type=int, help="""epochs""") + parser.add_argument('--log_steps', default=1, type=int, help="""log frequency""") + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval, help='autotune flag, default is False') + parser.add_argument('--static', default=0, type=int,help="""static shape""") + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + + +args = parse_args() + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + if args.auto_tune: + npu_device.global_options().auto_tune_mode="RL,GA" + npu_device.open().as_default() + +npu_config() + +data_path = os.path.join(args.data_dir, 'mnist.npz') +batch_size = args.batch_size +initial_learning_rate = args.lr +epochs = args.epochs + +def preprocess(array): + """ + Normalizes the supplied array and reshapes it into the appropriate format. + """ + + array = array.astype("float32") / 255.0 + array = np.reshape(array, (len(array), 28, 28, 1)) + return array + +class LossHistory(tf.keras.callbacks.Callback): + def __init__(self, bs): + super().__init__() + self.batch_size = bs + def on_batch_begin(self, batch, logs={}): + self.start = time.time() + def on_batch_end(self, batch, logs={}): + if batch % args.log_steps == 0: + loss = logs.get('loss') + dura = time.time() - self.start + if dura < 10: + self.epoch_perf.append(dura) + #print('step:%d ,loss: %f ,time:%f'%(batch, loss, dura), flush=True) + def on_epoch_begin(self, epoch, logs={}): + self.epoch_perf = [] + self.epochstart = time.time() + def on_epoch_end(self, epoch, logs={}): + duration = time.time() - self.epochstart + print('epoch_duration: ', duration) + if epoch != 0: + self.perf.append(np.mean(self.epoch_perf)) + def on_train_begin(self, logs={}): + print('params: ', self.params) + self.perf = [] + def on_train_end(self, logs={}): + print('imgs/s: %.2f'%(self.batch_size / np.mean(self.perf))) + +# def noise(array): +# """ +# Adds random noise to each image in the supplied array. +# """ + +# noise_factor = 0.4 +# noisy_array = array + noise_factor * np.random.normal( +# loc=0.0, scale=1.0, size=array.shape +# ) + +# return np.clip(noisy_array, 0.0, 1.0) + + +# def display(array1, array2): +# """ +# Displays ten random images from each one of the supplied arrays. +# """ + +# n = 10 + +# indices = np.random.randint(len(array1), size=n) +# images1 = array1[indices, :] +# images2 = array2[indices, :] + +# plt.figure(figsize=(20, 4)) +# for i, (image1, image2) in enumerate(zip(images1, images2)): +# ax = plt.subplot(2, n, i + 1) +# plt.imshow(image1.reshape(28, 28)) +# plt.gray() +# ax.get_xaxis().set_visible(False) +# ax.get_yaxis().set_visible(False) + +# ax = plt.subplot(2, n, i + 1 + n) +# plt.imshow(image2.reshape(28, 28)) +# plt.gray() +# ax.get_xaxis().set_visible(False) +# ax.get_yaxis().set_visible(False) + +# plt.show() + + +""" +## Prepare the data +""" + +# Since we only need images from the dataset to encode and decode, we +# won't use the labels. +#(x_train, _), (x_test, _) = mnist.load_data(data_path) +(x_train, y_train), (x_test, y_test) = mnist.load_data(data_path) +if args.static==1: + x_train, y_train = np.array(x_train[:59904], dtype='object'), y_train[:59904] + x_test=np.array(x_test[:9984], dtype='object') + +# Normalize and reshape the data +train_data = preprocess(x_train) +test_data = preprocess(x_test) + +# Create a copy of the data with added noise +# noisy_train_data = noise(train_data) +# noisy_test_data = noise(test_data) + +# Display the train data and a version of it with added noise +# display(train_data, noisy_train_data) + +""" +## Build the autoencoder + +We are going to use the Functional API to build our convolutional autoencoder. +""" + +input = layers.Input(shape=(28, 28, 1)) + +# Encoder +x = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(input) +x = layers.MaxPooling2D((2, 2), padding="same")(x) +x = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(x) +x = layers.MaxPooling2D((2, 2), padding="same")(x) + +# Decoder +x = layers.Conv2DTranspose(32, (3, 3), strides=2, activation="relu", padding="same")(x) +x = layers.Conv2DTranspose(32, (3, 3), strides=2, activation="relu", padding="same")(x) +x = layers.Conv2D(1, (3, 3), activation="sigmoid", padding="same")(x) + +# Autoencoder +autoencoder = Model(input, x) +autoencoder.compile(optimizer="adam", loss="binary_crossentropy") +# autoencoder.summary() + +""" +Now we can train our autoencoder using `train_data` as both our input data +and target. Notice we are setting up the validation data using the same +format. +""" + +autoencoder.fit( + x=train_data, + y=train_data, + epochs=epochs, + batch_size=batch_size, + shuffle=True, + validation_data=(test_data, test_data), + callbacks=LossHistory(batch_size), + verbose=2 +) + +tf.saved_model.save(autoencoder, "model_saved_model") +""" +Let's predict on our test dataset and display the original image together with +the prediction from our autoencoder. + +Notice how the predictions are pretty close to the original images, although +not quite the same. +""" + +# predictions = autoencoder.predict(test_data) +# display(test_data, predictions) + +""" +Now that we know that our autoencoder works, let's retrain it using the noisy +data as our input and the clean data as our target. We want our autoencoder to +learn how to denoise the images. +""" + +# autoencoder.fit( +# x=noisy_train_data, +# y=train_data, +# epochs=100, +# batch_size=128, +# shuffle=True, +# validation_data=(noisy_test_data, test_data), +# ) + +""" +Let's now predict on the noisy data and display the results of our autoencoder. + +Notice how the autoencoder does an amazing job at removing the noise from the +input images. +""" + +# predictions = autoencoder.predict(noisy_test_data) +# display(noisy_test_data, predictions) \ No newline at end of file -- Gitee From 496b277d5574f91f257b3744ad07ddd4a48ea44c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:14:30 +0000 Subject: [PATCH 32/54] =?UTF-8?q?bit=5FID2496=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../bit_ID2496_for_TensorFlow2.X/LICENSE | 284 ++++++++++++ .../bit_ID2496_for_TensorFlow2.X/README.md | 245 ++++++++++ .../modelzoo_level.txt | 3 + .../requirements.txt | 6 + .../bit_ID2496_for_TensorFlow2.X/run_1p.sh | 1 + .../test/train_full_1p.sh | 162 +++++++ .../test/train_performance_1p_dynamic_eval.sh | 128 +++++ .../test/train_performance_1p_static_eval.sh | 167 +++++++ .../bit_ID2496_for_TensorFlow2.X/train.py | 437 ++++++++++++++++++ 9 files changed, 1433 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/run_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh create mode 100644 TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh create mode 100644 TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/train.py diff --git a/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..3d254e4f7 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/README.md @@ -0,0 +1,245 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Image Classification** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.4.8** + +**大小(Size):210KB** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Official** + +**描述(Description):基于TensorFlow框架的BigTransport(Bit)图像分类网络训练代码** + +

概述

+ +- BigTransport(也称为**BiT**)是一种图像分类迁移学习方法。 + +- 参考论文: + + https://arxiv.org/abs/1912.11370 + +- 参考实现: + + https://github.com/keras-team/keras-io/blob/master/examples/vision/bit.py + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + + +## 默认配置 +- 网络结构 + - class MyBiTModel(keras.Model): + def __init__(self, num_classes, module, **kwargs): + super().__init__(**kwargs) + + self.num_classes = num_classes + self.head = keras.layers.Dense(num_classes, kernel_initializer="zeros") + self.bit_model = module + + def call(self, images): + bit_embedding = self.bit_model(images) + return self.head(bit_embedding) + +- 训练超参(单卡): + - Batch size: 64 + - Train epoch: 15 + + +## 支持特性 + +| 特性列表 | 是否支持 | +|-------|------| +| 分布式训练 | 否 | +| 混合精度 | 否 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +拉起脚本中,传入--precision_mode='allow_mix_precision' + +``` +parameter explain: + '--log_steps', default=1, type=int, help='log frequency') + '--data_dir', default="../bit_datasets/", help='directory to data') + '--batch_size', default=64, type=int, help='batch size for 1p') + '--epochs', default=30, type=int, help='train epochs') + '--eval_static', dest="eval_static", type=ast.literal_eval, help='drop_reminder') + '--precision_mode', default="allow_mix_precision", type=str,help='train model') + '--over_dump', dest='over_dump', type=ast.literal_eval, help='if or not over detection, default is False') + '--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, help='data dump flag, default is False') + '--data_dump_step', default="10", help='data dump step, default is 10') + '--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + '--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + '--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + '--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + '--use_mixlist', dest='use_mixlist', type=ast.literal_eval, help='use_mixlist flag, default is False') + '--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, help='fusion_off flag, default is False') + '--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + '--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') +``` + +相关代码示例: + +``` +npu_device.global_options().precision_mode=FLAGS.precision_mode +``` + +

训练环境准备

+ +1. 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909)》 +2. 宿主机上需要安装Docker并登录[Ascend Hub中心](https://ascendhub.huawei.com/#/detail?name=ascend-tensorflow-arm)获取镜像。 + + + 当前模型支持的镜像列表如[表1](#zh-cn_topic_0000001074498056_table1519011227314)所示。 + + **表 1** 镜像列表 + + + + + + + + + + + + + +

镜像名称

+

镜像版本

+

配套CANN版本

+
+

20.12

+

20.2

+
+ +3. 运行以下命令安装依赖。 +``` +pip install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + +

快速上手

+ +## 数据集准备 + +1、用户自行准备好数据集,包括训练数据集和验证数据集。使用的数据集是wikipedia + +2、训练的数据集放在train目录,验证的数据集放在eval目录 + +3、bert 预训练的模型及数据集可以参考"简述->开源代码路径处理" + +数据集目录参考如下: + +``` +data +├── tf_flowers +│   └── 3.0.1 +│   ├── dataset_info.json +│   ├── features.json +│   ├── label.labels.txt +│   ├── tf_flowers-train.tfrecord-00000-of-00002 +│   └── tf_flowers-train.tfrecord-00001-of-00002 +└── ttst + ├── assets + ├── bit_m-r50x1_1.tar.gz + ├── saved_model.pb + └── variables + ├── variables.data-00000-of-00001 + └── variables.index +``` + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2. 1单卡训练指令(脚本位于bit_ID2496_for_TensorFlow2.X/test/train_full_1p.sh),请确保下面例子中的“--data_path”修改为用户的tfrecord的路径,这里选择将data文件夹放在home目录下。默认precision_mode='allow_mix_precision' + + bash train_full_1p.sh --data_path=/home/data + +

高级参考

+ +## 脚本和示例代码 + +``` +bit_ID2496_for_TensorFlow2.X +├── LICENSE +├── README.md +├── requirements.txt +├── run_1p.sh +├── train.py +└──test # 训练脚本目录 + ├── train_full_1p.sh + ├── train_performance_1p_dynamic_eval.sh + └── train_performance_1p_static_eval.sh +``` + +## 脚本参数 + +``` +batch_size=64 +#训练epoch,可选 +train_epochs=15 + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="${cur_path}/configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="${cur_path}/configs/fusion_switch.cfg" +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡或者多卡训练。单卡和多卡通过运行不同脚本,支持单卡,8卡网络训练。模型存储路径为${cur_path}/output/$ASCEND_DEVICE_ID,包括训练的log以及checkpoints文件。以8卡训练为例,loss信息在文件${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。 diff --git a/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..801f37760 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:POK +PercisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..c365b5b3c --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/requirements.txt @@ -0,0 +1,6 @@ +os +numpy +pandas +tensorflow +tensorflow_hub +tensorflow_datasets \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/run_1p.sh b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/run_1p.sh new file mode 100644 index 000000000..ef6943627 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/run_1p.sh @@ -0,0 +1 @@ +python3 train.py \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..bfc6e8cc1 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,162 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=64 +#网络名称,同目录名称 +Network="bit_ID2496_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=15 +#训练step +#train_steps=50000 +#学习率 +# learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="${cur_path}/configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="${cur_path}/configs/fusion_switch.cfg" +############维测参数############## + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + fi +done +############维测参数############## +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_static_eval.sh" + exit 1 +fi + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 $cur_path/train.py --data_dir=${data_path} \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --eval_static=True \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ,time: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $4}' | awk -F ':' '{print $2}' | tail -n 1` +wait +FPS=`grep imgs/s $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $2}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep val_accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |awk 'END {print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '\- loss:' $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh new file mode 100644 index 000000000..1e6f873df --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh @@ -0,0 +1,128 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL_ETP=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=64 +#网络名称,同目录名称 +Network="bit_ID2496_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +#train_epochs=30 +train_epochs=1 +#训练step +# train_steps=5 +#学习率 +# learning_rate=0.0001 +ckpt_path="" +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_dynamic_eval.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +#参数修改 +#sed -i "s|../data/tfrecord|${data_path}/data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_CKPT = ROOT_PATH + '/|PRETRAINED_CKPT = '${cur_path}/|g" ${cur_path}/libs/configs/cfgs.py + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 train.py --data_dir=${data_path}\ + --epochs=${train_epochs}\ + --batch_size=${batch_size}\ + --eval_static=False > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#参数回改 +#sed -i "s|${datth}/th}//io//tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_C'/|g" ${cur_paath}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep step: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |tail -n +2|awk -F ":" '{print $4}'|awk '{sum+=$1} END {print"",sum/NR}'|sed s/[[:space:]]//g` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep val_accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |awk 'END {print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'dynamic'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +#TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 + +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep loss|tail -n +2|grep -v "10/10"|awk '{print $3}'|sed s/[[:space:]]//g > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +#ModelStatus="图执行FAIL" +#DTS_Number="DTS2022012004581" +#error_msg="Output shape is still unknown after shape inference" +#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#error_msg=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|tail -l` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh new file mode 100644 index 000000000..98a7d0a04 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh @@ -0,0 +1,167 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=64 +#网络名称,同目录名称 +Network="bit_ID2496_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=5 + +log_steps=1 +#训练step +#train_steps=50000 +#学习率 +# learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="${cur_path}/configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="${cur_path}/configs/fusion_switch.cfg" +############维测参数############## + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --log_steps* ]];then + log_steps=`echo ${para#*=}` + fi +done +############维测参数############## +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_static_eval.sh" + exit 1 +fi + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 $cur_path/train.py --data_dir=${data_path} \ + --log_steps=$log_steps \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --eval_static=True \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ,time: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $4}' | awk -F ':' '{print $2}' | tail -n 1` +wait +FPS=`grep imgs/s $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $2}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep val_accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |awk 'END {print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '\- loss:' $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..837044fc6 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/bit_ID2496_for_TensorFlow2.X/train.py @@ -0,0 +1,437 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Title: Image Classification using BigTransfer (BiT) +Author: [Sayan Nath](https://twitter.com/sayannath2350) +Date created: 2021/09/24 +Last modified: 2021/09/24 +Description: BigTransfer (BiT) State-of-the-art transfer learning for image classification. +""" + +""" +## Introduction + +BigTransfer (also known as BiT) is a state-of-the-art transfer learning method for image +classification. Transfer of pre-trained representations improves sample efficiency and +simplifies hyperparameter tuning when training deep neural networks for vision. BiT +revisit the paradigm of pre-training on large supervised datasets and fine-tuning the +model on a target task. The importance of appropriately choosing normalization layers and +scaling the architecture capacity as the amount of pre-training data increases. + +BigTransfer(BiT) is trained on public datasets, along with code in +[TF2, Jax and Pytorch](https://github.com/google-research/big_transfer). This will help anyone to reach +state of the art performance on their task of interest, even with just a handful of +labeled images per class. + +You can find BiT models pre-trained on +[ImageNet](https://image-net.org/challenges/LSVRC/2012/index) and ImageNet-21k in +[TFHub](https://tfhub.dev/google/collections/bit/1) as TensorFlow2 SavedModels that you +can use easily as Keras Layers. There are a variety of sizes ranging from a standard +ResNet50 to a ResNet152x4 (152 layers deep, 4x wider than a typical ResNet50) for users +with larger computational and memory budgets but higher accuracy requirements. + +![](https://i.imgur.com/XeWVfe7.jpeg) +Figure: The x-axis shows the number of images used per class, ranging from 1 to the full +dataset. On the plots on the left, the curve in blue above is our BiT-L model, whereas +the curve below is a ResNet-50 pre-trained on ImageNet (ILSVRC-2012). +""" + +""" +## Setup +""" +import npu_device + +import os +import ast +import time +import numpy as np +import pandas as pd +# import matplotlib.pyplot as plt +import argparse +import tensorflow as tf +from tensorflow import keras +import tensorflow_hub as hub +import tensorflow_datasets as tfds + +tfds.disable_progress_bar() + +SEEDS = 42 + +np.random.seed(SEEDS) +tf.random.set_seed(SEEDS) + +def parse_args(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--log_steps', default=1, type=int, help="""log frequency""") + parser.add_argument('--data_dir', default="../bit_datasets/", help="""directory to data""") + parser.add_argument('--batch_size', default=64, type=int, help="""batch size for 1p""") + parser.add_argument('--epochs', default=30, type=int, help="""epochs""") + parser.add_argument('--eval_static', dest="eval_static", type=ast.literal_eval, help='the path to train data') + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + +args = parse_args() + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode=args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + npu_device.open().as_default() +npu_config() + +data_path = args.data_dir +bit_model_url = os.path.join(data_path, 'ttst') +num_epochs = args.epochs +""" +## Gather Flower Dataset +""" + +train_ds, validation_ds = tfds.load( + "tf_flowers", split=["train[:85%]", "train[85%:]"], download=False, + data_dir=data_path, as_supervised=True, +) + +""" +## Visualise the dataset +""" + +# plt.figure(figsize=(10, 10)) +# for i, (image, label) in enumerate(train_ds.take(9)): +# ax = plt.subplot(3, 3, i + 1) +# plt.imshow(image) +# plt.title(int(label)) +# plt.axis("off") + +""" +## Define hyperparameters +""" + +RESIZE_TO = 384 +CROP_TO = 224 +BATCH_SIZE = args.batch_size +STEPS_PER_EPOCH = 10 +AUTO = tf.data.AUTOTUNE # optimise the pipeline performance +NUM_CLASSES = 5 # number of classes +SCHEDULE_LENGTH = ( + 500 # we will train on lower resolution images and will still attain good results +) +SCHEDULE_BOUNDARIES = [ + 200, + 300, + 400, +] # more the dataset size the schedule length increase + +""" +The hyperparamteres like `SCHEDULE_LENGTH` and `SCHEDULE_BOUNDARIES` are determined based +on empirical results. The method has been explained in the [original +paper](https://arxiv.org/abs/1912.11370) and in their [Google AI Blog +Post](https://ai.googleblog.com/2020/05/open-sourcing-bit-exploring-large-scale.html). + +The `SCHEDULE_LENGTH` is aslo determined whether to use [MixUp +Augmentation](https://arxiv.org/abs/1710.09412) or not. You can also find an easy MixUp +Implementation in [Keras Coding Examples](https://keras.io/examples/vision/mixup/). + +![](https://i.imgur.com/oSaIBYZ.jpeg) +""" + +""" +## Define preprocessing helper functions +""" + +SCHEDULE_LENGTH = SCHEDULE_LENGTH * 512 / BATCH_SIZE + + +@tf.function +def preprocess_train(image, label): + image = tf.image.random_flip_left_right(image) + image = tf.image.resize(image, (RESIZE_TO, RESIZE_TO)) + image = tf.image.random_crop(image, (CROP_TO, CROP_TO, 3)) + image = image / 255.0 + return (image, label) + + +@tf.function +def preprocess_test(image, label): + image = tf.image.resize(image, (RESIZE_TO, RESIZE_TO)) + image = image / 255.0 + return (image, label) + + +DATASET_NUM_TRAIN_EXAMPLES = train_ds.cardinality().numpy() + +repeat_count = int( + SCHEDULE_LENGTH * BATCH_SIZE / DATASET_NUM_TRAIN_EXAMPLES * STEPS_PER_EPOCH +) +repeat_count += 50 + 1 # To ensure at least there are 50 epochs of training + +""" +## Define the data pipeline +""" +if args.eval_static: + # Training pipeline + pipeline_train = ( + train_ds.shuffle(10000) + .repeat(repeat_count) # Repeat dataset_size / num_steps + .map(preprocess_train, num_parallel_calls=AUTO) + .batch(BATCH_SIZE, drop_remainder=True) + .prefetch(AUTO) + ) + + # Validation pipeline + pipeline_validation = ( + validation_ds.map(preprocess_test, num_parallel_calls=AUTO) + .batch(BATCH_SIZE, drop_remainder=True) + .prefetch(AUTO) + ) +else: + # Training pipeline + pipeline_train = ( + train_ds.shuffle(10000) + .repeat(repeat_count) # Repeat dataset_size / num_steps + .map(preprocess_train, num_parallel_calls=AUTO) + .batch(BATCH_SIZE) + .prefetch(AUTO) + ) + + # Validation pipeline + pipeline_validation = ( + validation_ds.map(preprocess_test, num_parallel_calls=AUTO) + .batch(BATCH_SIZE) + .prefetch(AUTO) + ) + +class LossHistory(tf.keras.callbacks.Callback): + def __init__(self, bs): + super().__init__() + self.batch_size = bs + def on_batch_begin(self, batch, logs={}): + self.start = time.time() + def on_batch_end(self, batch, logs={}): + if batch % args.log_steps == 0: + loss = logs.get('loss') + dura = time.time() - self.start + if dura < 10: + self.epoch_perf.append(dura) + print('step:%d ,loss: %f ,time:%f'%(batch, loss, dura), flush=True) + def on_epoch_begin(self, epoch, logs={}): + self.epoch_perf = [] + self.epochstart = time.time() + def on_epoch_end(self, epoch, logs={}): + duration = time.time() - self.epochstart + print('epoch_duration: ', duration) + if epoch != 0: + self.perf.append(np.mean(self.epoch_perf)) + def on_train_begin(self, logs={}): + print('params: ', self.params) + self.perf = [] + def on_train_end(self, logs={}): + print('imgs/s: %.2f'%(self.batch_size / np.mean(self.perf))) + +logger = LossHistory(args.batch_size) +""" +## Visualise the training samples +""" + +# image_batch, label_batch = next(iter(pipeline_train)) + +# plt.figure(figsize=(10, 10)) +# for n in range(25): +# ax = plt.subplot(5, 5, n + 1) +# plt.imshow(image_batch[n]) +# plt.title(label_batch[n].numpy()) +# plt.axis("off") + +""" +## Load pretrained TF-Hub model into a `KerasLayer` +""" + +# bit_model_url = "https://tfhub.dev/google/bit/m-r50x1/1" +bit_module = hub.KerasLayer(bit_model_url) + +""" +## Create BigTransfer (BiT) model + +To create the new model, we: + +1. Cut off the BiT model’s original head. This leaves us with the “pre-logits” output. +We do not have to do this if we use the ‘feature extractor’ models (i.e. all those in +subdirectories titled `feature_vectors`), since for those models the head has already +been cut off. + +2. Add a new head with the number of outputs equal to the number of classes of our new +task. Note that it is important that we initialise the head to all zeroes. +""" + + +class MyBiTModel(keras.Model): + def __init__(self, num_classes, module, **kwargs): + super().__init__(**kwargs) + + self.num_classes = num_classes + self.head = keras.layers.Dense(num_classes, kernel_initializer="zeros") + self.bit_model = module + + def call(self, images): + bit_embedding = self.bit_model(images) + return self.head(bit_embedding) + + +model = MyBiTModel(num_classes=NUM_CLASSES, module=bit_module) + +""" +## Define optimizer and loss +""" + +learning_rate = 0.003 * BATCH_SIZE / 512 + +# Decay learning rate by a factor of 10 at SCHEDULE_BOUNDARIES. +lr_schedule = keras.optimizers.schedules.PiecewiseConstantDecay( + boundaries=SCHEDULE_BOUNDARIES, + values=[ + learning_rate, + learning_rate * 0.1, + learning_rate * 0.01, + learning_rate * 0.001, + ], +) +optimizer = keras.optimizers.SGD(learning_rate=lr_schedule, momentum=0.9) + +loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) + +""" +## Compile the model +""" + +model.compile(optimizer=optimizer, loss=loss_fn, metrics=["accuracy"]) + +""" +## Set up callbacks +""" + +train_callbacks = [ + keras.callbacks.EarlyStopping( + monitor="val_accuracy", patience=2, restore_best_weights=True + ), + logger +] + +""" +## Train the model +""" + +history = model.fit( + pipeline_train, + batch_size=BATCH_SIZE, + epochs=num_epochs, + steps_per_epoch=STEPS_PER_EPOCH, + validation_data=pipeline_validation, + callbacks=train_callbacks, + verbose=2 +) + +""" +## Plot the training and validation metrics +""" + + +# def plot_hist(hist): +# plt.plot(hist.history["accuracy"]) +# plt.plot(hist.history["val_accuracy"]) +# plt.plot(hist.history["loss"]) +# plt.plot(hist.history["val_loss"]) +# plt.title("Training Progress") +# plt.ylabel("Accuracy/Loss") +# plt.xlabel("Epochs") +# plt.legend(["train_acc", "val_acc", "train_loss", "val_loss"], loc="upper left") +# plt.show() + + +# plot_hist(history) + +""" +## Evaluate the model +""" + +# accuracy = model.evaluate(pipeline_validation)[1] * 100 +# print("Accuracy: {:.2f}%".format(accuracy)) + +""" +## Conclusion + +BiT performs well across a surprisingly wide range of data regimes +-- from 1 example per class to 1M total examples. BiT achieves 87.5% top-1 accuracy on +ILSVRC-2012, 99.4% on CIFAR-10, and 76.3% on the 19 task Visual Task Adaptation Benchmark +(VTAB). On small datasets, BiT attains 76.8% on ILSVRC-2012 with 10 examples per class, +and 97.0% on CIFAR-10 with 10 examples per class. + +![](https://i.imgur.com/b1Lw5fz.png) + +You can experiment further with the BigTransfer Method by following the +[original paper](https://arxiv.org/abs/1912.11370). +""" \ No newline at end of file -- Gitee From 9d02d8ea5cb9cb34d4ce08927a81cfae47c3f125 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:14:52 +0000 Subject: [PATCH 33/54] =?UTF-8?q?consistency=5Ftraining=5FID2499=5Ffor=5FT?= =?UTF-8?q?ensorFlow2.X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 284 +++++ .../My_SWA.py | 152 +++ .../README.md | 183 ++++ .../augment.py | 985 ++++++++++++++++++ .../modelzoo_level.txt | 0 .../requirements.txt | 5 + .../test/train_full_1p.sh | 162 +++ .../test/train_performance_1p_dynamic_eval.sh | 128 +++ .../test/train_performance_1p_static_eval.sh | 162 +++ .../train.py | 598 +++++++++++ 10 files changed, 2659 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/My_SWA.py create mode 100644 TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/augment.py create mode 100644 TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh create mode 100644 TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh create mode 100644 TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/train.py diff --git a/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/My_SWA.py b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/My_SWA.py new file mode 100644 index 000000000..d448d5e08 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/My_SWA.py @@ -0,0 +1,152 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""An implementation of the Stochastic Weight Averaging optimizer. + +The Stochastic Weight Averaging mechanism was proposed by Pavel Izmailov +et. al in the paper [Averaging Weights Leads to Wider Optima and Better +Generalization](https://arxiv.org/abs/1803.05407). The optimizer +implements averaging of multiple points along the trajectory of SGD. +This averaging has shown to improve model performance on validation/test +sets whilst possibly causing a small increase in loss on the training +set. +""" + +import tensorflow as tf +from tensorflow_addons.optimizers.average_wrapper import AveragedOptimizerWrapper +from tensorflow_addons.utils import types +from tensorflow.python.keras import backend +from typeguard import typechecked + + +@tf.keras.utils.register_keras_serializable(package="Addons") +class My_SWA(AveragedOptimizerWrapper): + """This class extends optimizers with Stochastic Weight Averaging (SWA). + + The Stochastic Weight Averaging mechanism was proposed by Pavel Izmailov + et. al in the paper [Averaging Weights Leads to Wider Optima and + Better Generalization](https://arxiv.org/abs/1803.05407). The optimizer + implements averaging of multiple points along the trajectory of SGD. The + optimizer expects an inner optimizer which will be used to apply the + gradients to the variables and itself computes a running average of the + variables every `k` steps (which generally corresponds to the end + of a cycle when a cyclic learning rate is employed). + + We also allow the specification of the number of steps averaging + should first happen after. Let's say, we want averaging to happen every `k` + steps after the first `m` steps. After step `m` we'd take a snapshot of the + variables and then average the weights appropriately at step `m + k`, + `m + 2k` and so on. The assign_average_vars function can be called at the + end of training to obtain the averaged_weights from the optimizer. + + Note: If your model has batch-normalization layers you would need to run + the final weights through the data to compute the running mean and + variance corresponding to the activations for each layer of the network. + From the paper: If the DNN uses batch normalization we run one + additional pass over the data, to compute the running mean and standard + deviation of the activations for each layer of the network with SWA + weights after the training is finished, since these statistics are not + collected during training. For most deep learning libraries, such as + PyTorch or Tensorflow, one can typically collect these statistics by + making a forward pass over the data in training mode + ([Averaging Weights Leads to Wider Optima and Better + Generalization](https://arxiv.org/abs/1803.05407)) + + Example of usage: + + ```python + opt = tf.keras.optimizers.SGD(learning_rate) + opt = tfa.optimizers.SWA(opt, start_averaging=m, average_period=k) + ``` + """ + + @typechecked + def __init__( + self, + optimizer: types.Optimizer, + start_averaging: int = 0, + average_period: int = 10, + name: str = "SWA", + **kwargs, + ): + r"""Wrap optimizer with the Stochastic Weight Averaging mechanism. + + Args: + optimizer: The original optimizer that will be used to compute and + apply the gradients. + start_averaging: An integer. Threshold to start averaging using + SWA. Averaging only occurs at `start_averaging` iters, must + be >= 0. If start_averaging = m, the first snapshot will be + taken after the mth application of gradients (where the first + iteration is iteration 0). + average_period: An integer. The synchronization period of SWA. The + averaging occurs every average_period steps. Averaging period + needs to be >= 1. + name: Optional name for the operations created when applying + gradients. Defaults to 'SWA'. + **kwargs: keyword arguments. Allowed to be {`clipnorm`, + `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by + norm; `clipvalue` is clip gradients by value, `decay` is + included for backward compatibility to allow time inverse + decay of learning rate. `lr` is included for backward + compatibility, recommended to use `learning_rate` instead. + """ + super().__init__(optimizer, name, **kwargs) + self.iterations = backend.variable(0,dtype='int32',name='iterations') + + if average_period < 1: + raise ValueError("average_period must be >= 1") + if start_averaging < 0: + raise ValueError("start_averaging must be >= 0") + + self._set_hyper("average_period", average_period) + self._set_hyper("start_averaging", start_averaging) + + @tf.function + def average_op(self, var, average_var, local_apply_state): + #average_period = self._get_hyper("average_period", tf.dtypes.int64) + #start_averaging = self._get_hyper("start_averaging", tf.dtypes.int64) + average_period = self._get_hyper("average_period", tf.dtypes.int32) + start_averaging = self._get_hyper("start_averaging", tf.dtypes.int32) + # number of times snapshots of weights have been taken (using max to + # avoid negative values of num_snapshots). + ''' + num_snapshots = tf.math.maximum( + tf.cast(0, tf.int64), + tf.math.floordiv(self.iterations - start_averaging, average_period), + ) + ''' + num_snapshots = tf.math.maximum( + tf.cast(0, tf.int32), + tf.math.floordiv(self.iterations - start_averaging, average_period), + ) + + # The average update should happen iff two conditions are met: + # 1. A min number of iterations (start_averaging) have taken place. + # 2. Iteration is one in which snapshot should be taken. + checkpoint = start_averaging + num_snapshots * average_period + if self.iterations >= start_averaging and self.iterations == checkpoint: + num_snapshots = tf.cast(num_snapshots, tf.float32) + average_value = (average_var * num_snapshots + var) / (num_snapshots + 1.0) + return average_var.assign(average_value, use_locking=self._use_locking) + + return average_var + + def get_config(self): + config = { + "average_period": self._serialize_hyperparameter("average_period"), + "start_averaging": self._serialize_hyperparameter("start_averaging"), + } + base_config = super().get_config() + return {**base_config, **config} diff --git a/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..5bf35a04b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/README.md @@ -0,0 +1,183 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain): Image Classification** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.04.08** + +**大小(Size):365M** + +**框架(Framework):TensorFlow_2.4.1** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Research** + +**描述(Description):一种在半监督学习范式下训练深层神经网络的简单且计算效率高的算法** + +

概述

+ + 一种在半监督学习范式下训练深层神经网络的简单且计算效率高的算法。 + +- 参考论文: + + [Unsupervised Data Augmentation for Consistency Training](https://arxiv.org/abs/1904.12848) + +- 参考实现: + + [https://github.com/keras-team/keras-io/blob/master/examples/vision/consistency_training.py](https://github.com/keras-team/keras-io/blob/master/examples/vision/consistency_training.py) + +- 适配昇腾 AI 处理器的实现:skip + + [https://gitee.com/jelly_111/research_tf2/tree/master/consistency_training_ID2499_for_TensorFlow2.X](https://gitee.com/jelly_111/research_tf2/tree/master/consistency_training_ID2499_for_TensorFlow2.X) + +- 通过Git获取对应commit\_id的代码方法如下: + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + +## 默认配置 + +- 主要训练超参(单卡): + - batch_size: 128 + - epochs: 5 + - lr: 0.001 + +## 支持特性 + +| 特性列表 | 是否支持 | +| ----------| --------| +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 + + +``` + npu_device.global_options().precision_mode='allow_mix_precision' + npu_device.open().as_default() +``` + + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + +

快速上手

+ +## 数据集准备 + +1. 用户需自行下载cifar10训练数据集,应有如下结构 + ``` + cifar10/ + ├── batches.meta + ├── data_batch_1 + ├── data_batch_1 + └── ... + ``` + +## 模型训练 + +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + 2. 单卡训练 + + 2.1 配置train_full_1p.sh脚本中`data_path`(脚本路径consistency_training_ID2499_for_TensorFlow2.X/test/train_full_1p.sh),请用户根据实际路径配置,数据集参数如下所示: + + --data_path=/home/cifar10 + + 2.2 1p指令如下: + + bash train_full_1p.sh --data_path=/home/cifar10 + + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备。 + +- 模型训练。 + + 参考“模型训练”中训练步骤。 + +- 模型评估。 + + 参考“模型训练”中验证步骤。 + +

高级参考

+ +## 脚本和示例代码 + +``` +consistency_training_ID2499_for_TensorFlow2.X/ +├── LICENSE +├── README.md +├── requirements.txt +├── test +│   ├── train_full_1p.sh +│   ├── train_performance_1p_static_eval.sh +│   ├── train_performance_1p_dynamic_eval.sh +├── train.py +├── My_SWA.py +└── augment.py + +``` + +## 脚本参数 + +``` +--data_dir 训练数据集路径 +--epochs 训练epoch设置 +--batch_size 训练bs设置 +``` + +## 训练过程 + +1. 通过“模型训练”中的训练指令启动单卡训练。 +2. 将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 +3. 模型存储路径为“curpath/output/ASCEND_DEVICE_ID”,包括训练的log文件。 +4. 以多卡训练为例,loss信息在文件curpath/output/{ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。 + +## 推理/验证过程 + +``` + NA + +``` diff --git a/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/augment.py b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/augment.py new file mode 100644 index 000000000..8b40ea727 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/augment.py @@ -0,0 +1,985 @@ +# Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""AutoAugment and RandAugment policies for enhanced image preprocessing. + +AutoAugment Reference: https://arxiv.org/abs/1805.09501 +RandAugment Reference: https://arxiv.org/abs/1909.13719 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +from typing import Any, Dict, List, Optional, Text, Tuple + +from keras.layers.preprocessing import image_preprocessing as image_ops +import tensorflow as tf + + +# This signifies the max integer that the controller RNN could predict for the +# augmentation scheme. +_MAX_LEVEL = 10. + + +def to_4d(image: tf.Tensor) -> tf.Tensor: + """Converts an input Tensor to 4 dimensions. + + 4D image => [N, H, W, C] or [N, C, H, W] + 3D image => [1, H, W, C] or [1, C, H, W] + 2D image => [1, H, W, 1] + + Args: + image: The 2/3/4D input tensor. + + Returns: + A 4D image tensor. + + Raises: + `TypeError` if `image` is not a 2/3/4D tensor. + + """ + shape = tf.shape(image) + original_rank = tf.rank(image) + left_pad = tf.cast(tf.less_equal(original_rank, 3), dtype=tf.int32) + right_pad = tf.cast(tf.equal(original_rank, 2), dtype=tf.int32) + new_shape = tf.concat( + [ + tf.ones(shape=left_pad, dtype=tf.int32), + shape, + tf.ones(shape=right_pad, dtype=tf.int32), + ], + axis=0, + ) + return tf.reshape(image, new_shape) + + +def from_4d(image: tf.Tensor, ndims: tf.Tensor) -> tf.Tensor: + """Converts a 4D image back to `ndims` rank.""" + shape = tf.shape(image) + begin = tf.cast(tf.less_equal(ndims, 3), dtype=tf.int32) + end = 4 - tf.cast(tf.equal(ndims, 2), dtype=tf.int32) + new_shape = shape[begin:end] + return tf.reshape(image, new_shape) + + +def _convert_translation_to_transform(translations: tf.Tensor) -> tf.Tensor: + """Converts translations to a projective transform. + + The translation matrix looks like this: + [[1 0 -dx] + [0 1 -dy] + [0 0 1]] + + Args: + translations: The 2-element list representing [dx, dy], or a matrix of + 2-element lists representing [dx dy] to translate for each image. The + shape must be static. + + Returns: + The transformation matrix of shape (num_images, 8). + + Raises: + `TypeError` if + - the shape of `translations` is not known or + - the shape of `translations` is not rank 1 or 2. + + """ + translations = tf.convert_to_tensor(translations, dtype=tf.float32) + if translations.get_shape().ndims is None: + raise TypeError('translations rank must be statically known') + elif len(translations.get_shape()) == 1: + translations = translations[None] + elif len(translations.get_shape()) != 2: + raise TypeError('translations should have rank 1 or 2.') + num_translations = tf.shape(translations)[0] + + return tf.concat( + values=[ + tf.ones((num_translations, 1), tf.dtypes.float32), + tf.zeros((num_translations, 1), tf.dtypes.float32), + -translations[:, 0, None], + tf.zeros((num_translations, 1), tf.dtypes.float32), + tf.ones((num_translations, 1), tf.dtypes.float32), + -translations[:, 1, None], + tf.zeros((num_translations, 2), tf.dtypes.float32), + ], + axis=1, + ) + + +def _convert_angles_to_transform(angles: tf.Tensor, image_width: tf.Tensor, + image_height: tf.Tensor) -> tf.Tensor: + """Converts an angle or angles to a projective transform. + + Args: + angles: A scalar to rotate all images, or a vector to rotate a batch of + images. This must be a scalar. + image_width: The width of the image(s) to be transformed. + image_height: The height of the image(s) to be transformed. + + Returns: + A tensor of shape (num_images, 8). + + Raises: + `TypeError` if `angles` is not rank 0 or 1. + + """ + angles = tf.convert_to_tensor(angles, dtype=tf.float32) + if len(angles.get_shape()) == 0: # pylint:disable=g-explicit-length-test + angles = angles[None] + elif len(angles.get_shape()) != 1: + raise TypeError('Angles should have a rank 0 or 1.') + x_offset = ((image_width - 1) - + (tf.math.cos(angles) * (image_width - 1) - tf.math.sin(angles) * + (image_height - 1))) / 2.0 + y_offset = ((image_height - 1) - + (tf.math.sin(angles) * (image_width - 1) + tf.math.cos(angles) * + (image_height - 1))) / 2.0 + num_angles = tf.shape(angles)[0] + return tf.concat( + values=[ + tf.math.cos(angles)[:, None], + -tf.math.sin(angles)[:, None], + x_offset[:, None], + tf.math.sin(angles)[:, None], + tf.math.cos(angles)[:, None], + y_offset[:, None], + tf.zeros((num_angles, 2), tf.dtypes.float32), + ], + axis=1, + ) + + +def transform(image: tf.Tensor, transforms) -> tf.Tensor: + """Prepares input data for `image_ops.transform`.""" + original_ndims = tf.rank(image) + transforms = tf.convert_to_tensor(transforms, dtype=tf.float32) + if transforms.shape.rank == 1: + transforms = transforms[None] + image = to_4d(image) + image = image_ops.transform( + images=image, transforms=transforms, interpolation='nearest') + return from_4d(image, original_ndims) + + +def translate(image: tf.Tensor, translations) -> tf.Tensor: + """Translates image(s) by provided vectors. + + Args: + image: An image Tensor of type uint8. + translations: A vector or matrix representing [dx dy]. + + Returns: + The translated version of the image. + + """ + transforms = _convert_translation_to_transform(translations) + return transform(image, transforms=transforms) + + +def rotate(image: tf.Tensor, degrees: float) -> tf.Tensor: + """Rotates the image by degrees either clockwise or counterclockwise. + + Args: + image: An image Tensor of type uint8. + degrees: Float, a scalar angle in degrees to rotate all images by. If + degrees is positive the image will be rotated clockwise otherwise it will + be rotated counterclockwise. + + Returns: + The rotated version of image. + + """ + # Convert from degrees to radians. + degrees_to_radians = math.pi / 180.0 + radians = tf.cast(degrees * degrees_to_radians, tf.float32) + + original_ndims = tf.rank(image) + image = to_4d(image) + + image_height = tf.cast(tf.shape(image)[1], tf.float32) + image_width = tf.cast(tf.shape(image)[2], tf.float32) + transforms = _convert_angles_to_transform( + angles=radians, image_width=image_width, image_height=image_height) + # In practice, we should randomize the rotation degrees by flipping + # it negatively half the time, but that's done on 'degrees' outside + # of the function. + image = transform(image, transforms=transforms) + return from_4d(image, original_ndims) + + +def blend(image1: tf.Tensor, image2: tf.Tensor, factor: float) -> tf.Tensor: + """Blend image1 and image2 using 'factor'. + + Factor can be above 0.0. A value of 0.0 means only image1 is used. + A value of 1.0 means only image2 is used. A value between 0.0 and + 1.0 means we linearly interpolate the pixel values between the two + images. A value greater than 1.0 "extrapolates" the difference + between the two pixel values, and we clip the results to values + between 0 and 255. + + Args: + image1: An image Tensor of type uint8. + image2: An image Tensor of type uint8. + factor: A floating point value above 0.0. + + Returns: + A blended image Tensor of type uint8. + """ + if factor == 0.0: + return tf.convert_to_tensor(image1) + if factor == 1.0: + return tf.convert_to_tensor(image2) + + image1 = tf.cast(image1, tf.float32) + image2 = tf.cast(image2, tf.float32) + + difference = image2 - image1 + scaled = factor * difference + + # Do addition in float. + temp = tf.cast(image1, tf.float32) + scaled + + # Interpolate + if factor > 0.0 and factor < 1.0: + # Interpolation means we always stay within 0 and 255. + return tf.cast(temp, tf.uint8) + + # Extrapolate: + # + # We need to clip and then cast. + return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8) + + +def cutout(image: tf.Tensor, pad_size: int, replace: int = 0) -> tf.Tensor: + """Apply cutout (https://arxiv.org/abs/1708.04552) to image. + + This operation applies a (2*pad_size x 2*pad_size) mask of zeros to + a random location within `img`. The pixel values filled in will be of the + value `replace`. The located where the mask will be applied is randomly + chosen uniformly over the whole image. + + Args: + image: An image Tensor of type uint8. + pad_size: Specifies how big the zero mask that will be generated is that is + applied to the image. The mask will be of size (2*pad_size x 2*pad_size). + replace: What pixel value to fill in the image in the area that has the + cutout mask applied to it. + + Returns: + An image Tensor that is of type uint8. + """ + image_height = tf.shape(image)[0] + image_width = tf.shape(image)[1] + + # Sample the center location in the image where the zero mask will be applied. + cutout_center_height = tf.random.uniform( + shape=[], minval=0, maxval=image_height, dtype=tf.int32) + + cutout_center_width = tf.random.uniform( + shape=[], minval=0, maxval=image_width, dtype=tf.int32) + + lower_pad = tf.maximum(0, cutout_center_height - pad_size) + upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size) + left_pad = tf.maximum(0, cutout_center_width - pad_size) + right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size) + + cutout_shape = [ + image_height - (lower_pad + upper_pad), + image_width - (left_pad + right_pad) + ] + padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]] + mask = tf.pad( + tf.zeros(cutout_shape, dtype=image.dtype), + padding_dims, + constant_values=1) + mask = tf.expand_dims(mask, -1) + mask = tf.tile(mask, [1, 1, 3]) + image = tf.where( + tf.equal(mask, 0), + tf.ones_like(image, dtype=image.dtype) * replace, image) + return image + + +def solarize(image: tf.Tensor, threshold: int = 128) -> tf.Tensor: + # For each pixel in the image, select the pixel + # if the value is less than the threshold. + # Otherwise, subtract 255 from the pixel. + return tf.where(image < threshold, image, 255 - image) + + +def solarize_add(image: tf.Tensor, + addition: int = 0, + threshold: int = 128) -> tf.Tensor: + # For each pixel in the image less than threshold + # we add 'addition' amount to it and then clip the + # pixel value to be between 0 and 255. The value + # of 'addition' is between -128 and 128. + added_image = tf.cast(image, tf.int64) + addition + added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8) + return tf.where(image < threshold, added_image, image) + + +def color(image: tf.Tensor, factor: float) -> tf.Tensor: + """Equivalent of PIL Color.""" + degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image)) + return blend(degenerate, image, factor) + + +def contrast(image: tf.Tensor, factor: float) -> tf.Tensor: + """Equivalent of PIL Contrast.""" + degenerate = tf.image.rgb_to_grayscale(image) + # Cast before calling tf.histogram. + degenerate = tf.cast(degenerate, tf.int32) + + # Compute the grayscale histogram, then compute the mean pixel value, + # and create a constant image size of that value. Use that as the + # blending degenerate target of the original image. + hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256) + mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0 + degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean + degenerate = tf.clip_by_value(degenerate, 0.0, 255.0) + degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8)) + return blend(degenerate, image, factor) + + +def brightness(image: tf.Tensor, factor: float) -> tf.Tensor: + """Equivalent of PIL Brightness.""" + degenerate = tf.zeros_like(image) + return blend(degenerate, image, factor) + + +def posterize(image: tf.Tensor, bits: int) -> tf.Tensor: + """Equivalent of PIL Posterize.""" + shift = 8 - bits + return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift) + + +def wrapped_rotate(image: tf.Tensor, degrees: float, replace: int) -> tf.Tensor: + """Applies rotation with wrap/unwrap.""" + image = rotate(wrap(image), degrees=degrees) + return unwrap(image, replace) + + +def translate_x(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor: + """Equivalent of PIL Translate in X dimension.""" + image = translate(wrap(image), [-pixels, 0]) + return unwrap(image, replace) + + +def translate_y(image: tf.Tensor, pixels: int, replace: int) -> tf.Tensor: + """Equivalent of PIL Translate in Y dimension.""" + image = translate(wrap(image), [0, -pixels]) + return unwrap(image, replace) + + +def shear_x(image: tf.Tensor, level: float, replace: int) -> tf.Tensor: + """Equivalent of PIL Shearing in X dimension.""" + # Shear parallel to x axis is a projective transform + # with a matrix form of: + # [1 level + # 0 1]. + image = transform( + image=wrap(image), transforms=[1., level, 0., 0., 1., 0., 0., 0.]) + return unwrap(image, replace) + + +def shear_y(image: tf.Tensor, level: float, replace: int) -> tf.Tensor: + """Equivalent of PIL Shearing in Y dimension.""" + # Shear parallel to y axis is a projective transform + # with a matrix form of: + # [1 0 + # level 1]. + image = transform( + image=wrap(image), transforms=[1., 0., 0., level, 1., 0., 0., 0.]) + return unwrap(image, replace) + + +def autocontrast(image: tf.Tensor) -> tf.Tensor: + """Implements Autocontrast function from PIL using TF ops. + + Args: + image: A 3D uint8 tensor. + + Returns: + The image after it has had autocontrast applied to it and will be of type + uint8. + """ + + def scale_channel(image: tf.Tensor) -> tf.Tensor: + """Scale the 2D image using the autocontrast rule.""" + # A possibly cheaper version can be done using cumsum/unique_with_counts + # over the histogram values, rather than iterating over the entire image. + # to compute mins and maxes. + lo = tf.cast(tf.reduce_min(image), tf.float32) + hi = tf.cast(tf.reduce_max(image), tf.float32) + + # Scale the image, making the lowest value 0 and the highest value 255. + def scale_values(im): + scale = 255.0 / (hi - lo) + offset = -lo * scale + im = tf.cast(im, tf.float32) * scale + offset + im = tf.clip_by_value(im, 0.0, 255.0) + return tf.cast(im, tf.uint8) + + result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image) + return result + + # Assumes RGB for now. Scales each channel independently + # and then stacks the result. + s1 = scale_channel(image[:, :, 0]) + s2 = scale_channel(image[:, :, 1]) + s3 = scale_channel(image[:, :, 2]) + image = tf.stack([s1, s2, s3], 2) + return image + + +def sharpness(image: tf.Tensor, factor: float) -> tf.Tensor: + """Implements Sharpness function from PIL using TF ops.""" + orig_image = image + image = tf.cast(image, tf.float32) + # Make image 4D for conv operation. + image = tf.expand_dims(image, 0) + # SMOOTH PIL Kernel. + kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]], + dtype=tf.float32, + shape=[3, 3, 1, 1]) / 13. + # Tile across channel dimension. + kernel = tf.tile(kernel, [1, 1, 3, 1]) + strides = [1, 1, 1, 1] + degenerate = tf.nn.depthwise_conv2d( + image, kernel, strides, padding='VALID', dilations=[1, 1]) + degenerate = tf.clip_by_value(degenerate, 0.0, 255.0) + degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0]) + + # For the borders of the resulting image, fill in the values of the + # original image. + mask = tf.ones_like(degenerate) + padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]]) + padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]]) + result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image) + + # Blend the final result. + return blend(result, orig_image, factor) + + +def equalize(image: tf.Tensor) -> tf.Tensor: + """Implements Equalize function from PIL using TF ops.""" + + def scale_channel(im, c): + """Scale the data in the channel to implement equalize.""" + im = tf.cast(im[:, :, c], tf.int32) + # Compute the histogram of the image channel. + histo = tf.histogram_fixed_width(im, [0, 255], nbins=256) + + # For the purposes of computing the step, filter out the nonzeros. + nonzero = tf.where(tf.not_equal(histo, 0)) + nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1]) + step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255 + + def build_lut(histo, step): + # Compute the cumulative sum, shifting by step // 2 + # and then normalization by step. + lut = (tf.cumsum(histo) + (step // 2)) // step + # Shift lut, prepending with 0. + lut = tf.concat([[0], lut[:-1]], 0) + # Clip the counts to be in range. This is done + # in the C code for image.point. + return tf.clip_by_value(lut, 0, 255) + + # If step is zero, return the original image. Otherwise, build + # lut from the full histogram and step and then index from it. + result = tf.cond( + tf.equal(step, 0), lambda: im, + lambda: tf.gather(build_lut(histo, step), im)) + + return tf.cast(result, tf.uint8) + + # Assumes RGB for now. Scales each channel independently + # and then stacks the result. + s1 = scale_channel(image, 0) + s2 = scale_channel(image, 1) + s3 = scale_channel(image, 2) + image = tf.stack([s1, s2, s3], 2) + return image + + +def invert(image: tf.Tensor) -> tf.Tensor: + """Inverts the image pixels.""" + image = tf.convert_to_tensor(image) + return 255 - image + + +def wrap(image: tf.Tensor) -> tf.Tensor: + """Returns 'image' with an extra channel set to all 1s.""" + shape = tf.shape(image) + extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype) + extended = tf.concat([image, extended_channel], axis=2) + return extended + + +def unwrap(image: tf.Tensor, replace: int) -> tf.Tensor: + """Unwraps an image produced by wrap. + + Where there is a 0 in the last channel for every spatial position, + the rest of the three channels in that spatial dimension are grayed + (set to 128). Operations like translate and shear on a wrapped + Tensor will leave 0s in empty locations. Some transformations look + at the intensity of values to do preprocessing, and we want these + empty pixels to assume the 'average' value, rather than pure black. + + + Args: + image: A 3D Image Tensor with 4 channels. + replace: A one or three value 1D tensor to fill empty pixels. + + Returns: + image: A 3D image Tensor with 3 channels. + """ + image_shape = tf.shape(image) + # Flatten the spatial dimensions. + flattened_image = tf.reshape(image, [-1, image_shape[2]]) + + # Find all pixels where the last channel is zero. + alpha_channel = tf.expand_dims(flattened_image[:, 3], axis=-1) + + replace = tf.concat([replace, tf.ones([1], image.dtype)], 0) + + # Where they are zero, fill them in with 'replace'. + flattened_image = tf.where( + tf.equal(alpha_channel, 0), + tf.ones_like(flattened_image, dtype=image.dtype) * replace, + flattened_image) + + image = tf.reshape(flattened_image, image_shape) + image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3]) + return image + + +def _randomly_negate_tensor(tensor): + """With 50% prob turn the tensor negative.""" + should_flip = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool) + final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor) + return final_tensor + + +def _rotate_level_to_arg(level: float): + level = (level / _MAX_LEVEL) * 30. + level = _randomly_negate_tensor(level) + return (level,) + + +def _shrink_level_to_arg(level: float): + """Converts level to ratio by which we shrink the image content.""" + if level == 0: + return (1.0,) # if level is zero, do not shrink the image + # Maximum shrinking ratio is 2.9. + level = 2. / (_MAX_LEVEL / level) + 0.9 + return (level,) + + +def _enhance_level_to_arg(level: float): + return ((level / _MAX_LEVEL) * 1.8 + 0.1,) + + +def _shear_level_to_arg(level: float): + level = (level / _MAX_LEVEL) * 0.3 + # Flip level to negative with 50% chance. + level = _randomly_negate_tensor(level) + return (level,) + + +def _translate_level_to_arg(level: float, translate_const: float): + level = (level / _MAX_LEVEL) * float(translate_const) + # Flip level to negative with 50% chance. + level = _randomly_negate_tensor(level) + return (level,) + + +def _mult_to_arg(level: float, multiplier: float = 1.): + return (int((level / _MAX_LEVEL) * multiplier),) + + +def _apply_func_with_prob(func: Any, image: tf.Tensor, args: Any, prob: float): + """Apply `func` to image w/ `args` as input with probability `prob`.""" + assert isinstance(args, tuple) + + # Apply the function with probability `prob`. + should_apply_op = tf.cast( + tf.floor(tf.random.uniform([], dtype=tf.float32) + prob), tf.bool) + augmented_image = tf.cond(should_apply_op, lambda: func(image, *args), + lambda: image) + return augmented_image + + +def select_and_apply_random_policy(policies: Any, image: tf.Tensor): + """Select a random policy from `policies` and apply it to `image`.""" + policy_to_select = tf.random.uniform([], maxval=len(policies), dtype=tf.int32) + # Note that using tf.case instead of tf.conds would result in significantly + # larger graphs and would even break export for some larger policies. + for (i, policy) in enumerate(policies): + image = tf.cond( + tf.equal(i, policy_to_select), + lambda selected_policy=policy: selected_policy(image), + lambda: image) + return image + + +NAME_TO_FUNC = { + 'AutoContrast': autocontrast, + 'Equalize': equalize, + 'Invert': invert, + 'Rotate': wrapped_rotate, + 'Posterize': posterize, + 'Solarize': solarize, + 'SolarizeAdd': solarize_add, + 'Color': color, + 'Contrast': contrast, + 'Brightness': brightness, + 'Sharpness': sharpness, + 'ShearX': shear_x, + 'ShearY': shear_y, + 'TranslateX': translate_x, + 'TranslateY': translate_y, + 'Cutout': cutout, +} + +# Functions that have a 'replace' parameter +REPLACE_FUNCS = frozenset({ + 'Rotate', + 'TranslateX', + 'ShearX', + 'ShearY', + 'TranslateY', + 'Cutout', +}) + + +def level_to_arg(cutout_const: float, translate_const: float): + """Creates a dict mapping image operation names to their arguments.""" + + no_arg = lambda level: () + posterize_arg = lambda level: _mult_to_arg(level, 4) + solarize_arg = lambda level: _mult_to_arg(level, 256) + solarize_add_arg = lambda level: _mult_to_arg(level, 110) + cutout_arg = lambda level: _mult_to_arg(level, cutout_const) + translate_arg = lambda level: _translate_level_to_arg(level, translate_const) + + args = { + 'AutoContrast': no_arg, + 'Equalize': no_arg, + 'Invert': no_arg, + 'Rotate': _rotate_level_to_arg, + 'Posterize': posterize_arg, + 'Solarize': solarize_arg, + 'SolarizeAdd': solarize_add_arg, + 'Color': _enhance_level_to_arg, + 'Contrast': _enhance_level_to_arg, + 'Brightness': _enhance_level_to_arg, + 'Sharpness': _enhance_level_to_arg, + 'ShearX': _shear_level_to_arg, + 'ShearY': _shear_level_to_arg, + 'Cutout': cutout_arg, + 'TranslateX': translate_arg, + 'TranslateY': translate_arg, + } + return args + + +def _parse_policy_info(name: Text, prob: float, level: float, + replace_value: List[int], cutout_const: float, + translate_const: float) -> Tuple[Any, float, Any]: + """Return the function that corresponds to `name` and update `level` param.""" + func = NAME_TO_FUNC[name] + args = level_to_arg(cutout_const, translate_const)[name](level) + + if name in REPLACE_FUNCS: + # Add in replace arg if it is required for the function that is called. + args = tuple(list(args) + [replace_value]) + + return func, prob, args + + +class ImageAugment(object): + """Image augmentation class for applying image distortions.""" + + def distort(self, image: tf.Tensor) -> tf.Tensor: + """Given an image tensor, returns a distorted image with the same shape. + + Args: + image: `Tensor` of shape [height, width, 3] representing an image. + + Returns: + The augmented version of `image`. + """ + raise NotImplementedError() + + +class AutoAugment(ImageAugment): + """Applies the AutoAugment policy to images. + + AutoAugment is from the paper: https://arxiv.org/abs/1805.09501. + """ + + def __init__(self, + augmentation_name: Text = 'v0', + policies: Optional[Dict[Text, Any]] = None, + cutout_const: float = 100, + translate_const: float = 250): + """Applies the AutoAugment policy to images. + + Args: + augmentation_name: The name of the AutoAugment policy to use. The + available options are `v0` and `test`. `v0` is the policy used for all + of the results in the paper and was found to achieve the best results on + the COCO dataset. `v1`, `v2` and `v3` are additional good policies found + on the COCO dataset that have slight variation in what operations were + used during the search procedure along with how many operations are + applied in parallel to a single image (2 vs 3). + policies: list of lists of tuples in the form `(func, prob, level)`, + `func` is a string name of the augmentation function, `prob` is the + probability of applying the `func` operation, `level` is the input + argument for `func`. + cutout_const: multiplier for applying cutout. + translate_const: multiplier for applying translation. + """ + super(AutoAugment, self).__init__() + + if policies is None: + self.available_policies = { + 'v0': self.policy_v0(), + 'test': self.policy_test(), + 'simple': self.policy_simple(), + } + + if augmentation_name not in self.available_policies: + raise ValueError( + 'Invalid augmentation_name: {}'.format(augmentation_name)) + + self.augmentation_name = augmentation_name + self.policies = self.available_policies[augmentation_name] + self.cutout_const = float(cutout_const) + self.translate_const = float(translate_const) + + def distort(self, image: tf.Tensor) -> tf.Tensor: + """Applies the AutoAugment policy to `image`. + + AutoAugment is from the paper: https://arxiv.org/abs/1805.09501. + + Args: + image: `Tensor` of shape [height, width, 3] representing an image. + + Returns: + A version of image that now has data augmentation applied to it based on + the `policies` pass into the function. + """ + input_image_type = image.dtype + + if input_image_type != tf.uint8: + image = tf.clip_by_value(image, 0.0, 255.0) + image = tf.cast(image, dtype=tf.uint8) + + replace_value = [128] * 3 + + # func is the string name of the augmentation function, prob is the + # probability of applying the operation and level is the parameter + # associated with the tf op. + + # tf_policies are functions that take in an image and return an augmented + # image. + tf_policies = [] + for policy in self.policies: + tf_policy = [] + # Link string name to the correct python function and make sure the + # correct argument is passed into that function. + for policy_info in policy: + policy_info = list(policy_info) + [ + replace_value, self.cutout_const, self.translate_const + ] + tf_policy.append(_parse_policy_info(*policy_info)) + # Now build the tf policy that will apply the augmentation procedue + # on image. + def make_final_policy(tf_policy_): + + def final_policy(image_): + for func, prob, args in tf_policy_: + image_ = _apply_func_with_prob(func, image_, args, prob) + return image_ + + return final_policy + + tf_policies.append(make_final_policy(tf_policy)) + + image = select_and_apply_random_policy(tf_policies, image) + image = tf.cast(image, dtype=input_image_type) + return image + + @staticmethod + def policy_v0(): + """Autoaugment policy that was used in AutoAugment Paper. + + Each tuple is an augmentation operation of the form + (operation, probability, magnitude). Each element in policy is a + sub-policy that will be applied sequentially on the image. + + Returns: + the policy. + """ + + # TODO(dankondratyuk): tensorflow_addons defines custom ops, which + # for some reason are not included when building/linking + # This results in the error, "Op type not registered + # 'Addons>ImageProjectiveTransformV2' in binary" when running on borg TPUs + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateY', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + return policy + + @staticmethod + def policy_simple(): + """Same as `policy_v0`, except with custom ops removed.""" + + policy = [ + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + ] + return policy + + @staticmethod + def policy_test(): + """Autoaugment test policy for debugging.""" + policy = [ + [('TranslateX', 1.0, 4), ('Equalize', 1.0, 10)], + ] + return policy + + +class RandAugment(ImageAugment): + """Applies the RandAugment policy to images. + + RandAugment is from the paper https://arxiv.org/abs/1909.13719, + """ + + def __init__(self, + num_layers: int = 2, + magnitude: float = 10., + cutout_const: float = 40., + translate_const: float = 100.): + """Applies the RandAugment policy to images. + + Args: + num_layers: Integer, the number of augmentation transformations to apply + sequentially to an image. Represented as (N) in the paper. Usually best + values will be in the range [1, 3]. + magnitude: Integer, shared magnitude across all augmentation operations. + Represented as (M) in the paper. Usually best values are in the range + [5, 10]. + cutout_const: multiplier for applying cutout. + translate_const: multiplier for applying translation. + """ + super(RandAugment, self).__init__() + + self.num_layers = num_layers + self.magnitude = float(magnitude) + self.cutout_const = float(cutout_const) + self.translate_const = float(translate_const) + self.available_ops = [ + 'AutoContrast', 'Equalize', 'Invert', 'Rotate', 'Posterize', 'Solarize', + 'Color', 'Contrast', 'Brightness', 'Sharpness', 'ShearX', 'ShearY', + 'TranslateX', 'TranslateY', 'Cutout', 'SolarizeAdd' + ] + + def distort(self, image: tf.Tensor) -> tf.Tensor: + """Applies the RandAugment policy to `image`. + + Args: + image: `Tensor` of shape [height, width, 3] representing an image. + + Returns: + The augmented version of `image`. + """ + input_image_type = image.dtype + + if input_image_type != tf.uint8: + image = tf.clip_by_value(image, 0.0, 255.0) + image = tf.cast(image, dtype=tf.uint8) + + replace_value = [128] * 3 + min_prob, max_prob = 0.2, 0.8 + + for _ in range(self.num_layers): + op_to_select = tf.random.uniform([], + maxval=len(self.available_ops) + 1, + dtype=tf.int32) + + branch_fns = [] + for (i, op_name) in enumerate(self.available_ops): + prob = tf.random.uniform([], + minval=min_prob, + maxval=max_prob, + dtype=tf.float32) + func, _, args = _parse_policy_info(op_name, prob, self.magnitude, + replace_value, self.cutout_const, + self.translate_const) + branch_fns.append(( + i, + # pylint:disable=g-long-lambda + lambda selected_func=func, selected_args=args: selected_func( + image, *selected_args))) + # pylint:enable=g-long-lambda + + image = tf.switch_case( + branch_index=op_to_select, + branch_fns=branch_fns, + default=lambda: tf.identity(image)) + + image = tf.cast(image, dtype=input_image_type) + return image \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..65d635056 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/requirements.txt @@ -0,0 +1,5 @@ +os +numpy +tensorflow +matplotlib +tensorflow_addons \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..095c3f510 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,162 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="consistency_training_ID2499_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=50 +#训练step +#train_steps=50000 +#学习率 +# learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_static_eval.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 $cur_path/train.py --data_dir=${data_path} \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --eval_static=True \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep "Test accuracy from student model:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $6}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh new file mode 100644 index 000000000..e7bd5c542 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh @@ -0,0 +1,128 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="consistency_training_ID2499_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +train_epochs=5 +#训练step +# train_steps=5 +#学习率 +# learning_rate=0.0001 +ckpt_path="" +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_dynamic_eval.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +#参数修改 +#sed -i "s|../data/tfrecord|${data_path}/data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_CKPT = ROOT_PATH + '/|PRETRAINED_CKPT = '${cur_path}/|g" ${cur_path}/libs/configs/cfgs.py + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 train.py --data_dir=${data_path}\ + --epochs=${train_epochs}\ + --batch_size=${batch_size}\ + --eval_static=False > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#参数回改 +#sed -i "s|${datth}/th}//io//tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_C'/|g" ${cur_paath}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=${e2e_time} +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep Accuracy|awk '{print $3}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 + +#cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep loss|awk '{print $9}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +ModelStatus="图执行FAIL" +DTS_Number="DTS2021090622224" +error_msg="type Conv2DBackpropFilter is not found in this op store" +Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +error_msg=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|tail -l` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log + diff --git a/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh new file mode 100644 index 000000000..3b7f16576 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh @@ -0,0 +1,162 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="consistency_training_ID2499_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=1 +#训练step +#train_steps=50000 +#学习率 +# learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_static_eval.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 $cur_path/train.py --data_dir=${data_path} \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --eval_static=True \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep "Test accuracy from student model:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $6}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..307b4cffb --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/consistency_training_ID2499_for_TensorFlow2.X/train.py @@ -0,0 +1,598 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Title: Consistency training with supervision +Author: [Sayak Paul](https://twitter.com/RisingSayak) +Date created: 2021/04/13 +Last modified: 2021/04/19 +Description: Training with consistency regularization for robustness against data distribution shifts. +""" +""" +Deep learning models excel in many image recognition tasks when the data is independent +and identically distributed (i.i.d.). However, they can suffer from performance +degradation caused by subtle distribution shifts in the input data (such as random +noise, contrast change, and blurring). So, naturally, there arises a question of +why. As discussed in [A Fourier Perspective on Model Robustness in Computer Vision](https://arxiv.org/pdf/1906.08988.pdf)), +there's no reason for deep learning models to be robust against such shifts. Standard +model training procedures (such as standard image classification training workflows) +*don't* enable a model to learn beyond what's fed to it in the form of training data. + +In this example, we will be training an image classification model enforcing a sense of +*consistency* inside it by doing the following: + +* Train a standard image classification model. +* Train an _equal or larger_ model on a noisy version of the dataset (augmented using +[RandAugment](https://arxiv.org/abs/1909.13719)). +* To do this, we will first obtain predictions of the previous model on the clean images +of the dataset. +* We will then use these predictions and train the second model to match these +predictions on the noisy variant of the same images. This is identical to the workflow of +[*Knowledge Distillation*](https://keras.io/examples/vision/knowledge_distillation/) but +since the student model is equal or larger in size this process is also referred to as +***Self-Training***. + +This overall training workflow finds its roots in works like +[FixMatch](https://arxiv.org/abs/2001.07685), [Unsupervised Data Augmentation for Consistency Training](https://arxiv.org/abs/1904.12848), +and [Noisy Student Training](https://arxiv.org/abs/1911.04252). Since this training +process encourages a model yield consistent predictions for clean as well as noisy +images, it's often referred to as *consistency training* or *training with consistency +regularization*. Although the example focuses on using consistency training to enhance +the robustness of models to common corruptions this example can also serve a template +for performing _weakly supervised learning_. + +This example requires TensorFlow 2.4 or higher, as well as TensorFlow Hub and TensorFlow +Models, which can be installed using the following command: + +""" + +"""shell +pip install -q tf-models-official tensorflow-addons +""" + +""" +## Imports and setup +""" +import imp +import npu_device +import time +import os +import ast +import numpy as np + +from augment import RandAugment +from tensorflow.keras import layers +from tensorflow.python.keras import backend as K +from tensorflow.python.keras.datasets.cifar import load_batch +import tensorflow as tf +import tensorflow_addons as tfa +# import matplotlib.pyplot as plt +import argparse + +tf.random.set_seed(42) + +""" +## Define hyperparameters +""" +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_dir', default="../cifar-10-batches-py/", + help="""directory to data""") + parser.add_argument('--batch_size', default=128, type=int, + help="""batch size for 1p""") + parser.add_argument('--epochs', default=5, type=int, + help="""epochs""") + parser.add_argument("--log_steps", default=50, type=int, + help="TimeHis log Step.") + parser.add_argument('--eval_static', dest="eval_static", type=ast.literal_eval, + help='the path to train data') + #===============================NPU Migration========================================= + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + +args = parse_args() + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + npu_device.open().as_default() +#===============================NPU Migration========================================= + +print('npu_device loaded') +npu_config() + +data_path = args.data_dir + +AUTO = tf.data.AUTOTUNE +BATCH_SIZE = args.batch_size +EPOCHS = args.epochs + +CROP_TO = 72 +RESIZE_TO = 96 + +""" +## Load the CIFAR-10 dataset +""" + +def load_data(data_path): + num_train_samples = 50000 + x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8') + y_train = np.empty((num_train_samples,), dtype='uint8') + + for i in range(1, 6): + fpath = os.path.join(data_path, 'data_batch_' + str(i)) + (x_train[(i - 1) * 10000:i * 10000, :, :, :], + y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath) + + fpath = os.path.join(data_path, 'test_batch') + x_test, y_test = load_batch(fpath) + + y_train = np.reshape(y_train, (len(y_train), 1)) + y_test = np.reshape(y_test, (len(y_test), 1)) + + if K.image_data_format() == 'channels_last': + x_train = x_train.transpose(0, 2, 3, 1) + x_test = x_test.transpose(0, 2, 3, 1) + + x_test = x_test.astype(x_train.dtype) + y_test = y_test.astype(y_train.dtype) + + return (x_train, y_train), (x_test, y_test) + +(x_train, y_train), (x_test, y_test) = load_data(data_path) + +val_samples = 49500 +new_train_x, new_y_train = x_train[: val_samples + 1], y_train[: val_samples + 1] +val_x, val_y = x_train[val_samples:], y_train[val_samples:] + +""" +## Create TensorFlow `Dataset` objects +""" + +# Initialize `RandAugment` object with 2 layers of +# augmentation transforms and strength of 9. +augmenter = RandAugment(num_layers=2, magnitude=9) + +""" +For training the teacher model, we will only be using two geometric augmentation +transforms: random horizontal flip and random crop. +""" + + +def preprocess_train(image, label, noisy=True): + image = tf.image.random_flip_left_right(image) + # We first resize the original image to a larger dimension + # and then we take random crops from it. + image = tf.image.resize(image, [RESIZE_TO, RESIZE_TO]) + image = tf.image.random_crop(image, [CROP_TO, CROP_TO, 3]) + if noisy: + image = augmenter.distort(image) + return image, label + + +def preprocess_test(image, label): + image = tf.image.resize(image, [CROP_TO, CROP_TO]) + return image, label + + +train_ds = tf.data.Dataset.from_tensor_slices((new_train_x, new_y_train)) +validation_ds = tf.data.Dataset.from_tensor_slices((val_x, val_y)) +test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)) + +""" +We make sure `train_clean_ds` and `train_noisy_ds` are shuffled using the *same* seed to +ensure their orders are exactly the same. This will be helpful during training the +student model. +""" +if args.eval_static: + + # This dataset will be used to train the first model. + train_clean_ds = ( + train_ds.shuffle(BATCH_SIZE * 10, seed=42) + .map(lambda x, y: (preprocess_train(x, y, noisy=False)), num_parallel_calls=AUTO) + .batch(BATCH_SIZE, drop_remainder=True) + .prefetch(AUTO) + ) + + # This prepares the `Dataset` object to use RandAugment. + train_noisy_ds = ( + train_ds.shuffle(BATCH_SIZE * 10, seed=42) + .map(preprocess_train, num_parallel_calls=AUTO) + .batch(BATCH_SIZE, drop_remainder=True) + .prefetch(AUTO) + ) + + validation_ds = ( + validation_ds.map(preprocess_test, num_parallel_calls=AUTO) + .batch(BATCH_SIZE, drop_remainder=True) + .prefetch(AUTO) + ) + + test_ds = ( + test_ds.map(preprocess_test, num_parallel_calls=AUTO) + .batch(BATCH_SIZE, drop_remainder=True) + .prefetch(AUTO) + ) +else: + # This dataset will be used to train the first model. + train_clean_ds = ( + train_ds.shuffle(BATCH_SIZE * 10, seed=42) + .map(lambda x, y: (preprocess_train(x, y, noisy=False)), num_parallel_calls=AUTO) + .batch(BATCH_SIZE) + .prefetch(AUTO) + ) + + # This prepares the `Dataset` object to use RandAugment. + train_noisy_ds = ( + train_ds.shuffle(BATCH_SIZE * 10, seed=42) + .map(preprocess_train, num_parallel_calls=AUTO) + .batch(BATCH_SIZE) + .prefetch(AUTO) + ) + + validation_ds = ( + validation_ds.map(preprocess_test, num_parallel_calls=AUTO) + .batch(BATCH_SIZE) + .prefetch(AUTO) + ) + + test_ds = ( + test_ds.map(preprocess_test, num_parallel_calls=AUTO) + .batch(BATCH_SIZE) + .prefetch(AUTO) + ) +# This dataset will be used to train the second model. +consistency_training_ds = tf.data.Dataset.zip((train_clean_ds, train_noisy_ds)) + +""" +## Visualize the datasets +""" + +# sample_images, sample_labels = next(iter(train_clean_ds)) +# plt.figure(figsize=(10, 10)) +# for i, image in enumerate(sample_images[:9]): +# ax = plt.subplot(3, 3, i + 1) +# plt.imshow(image.numpy().astype("int")) +# plt.axis("off") + +# sample_images, sample_labels = next(iter(train_noisy_ds)) +# plt.figure(figsize=(10, 10)) +# for i, image in enumerate(sample_images[:9]): +# ax = plt.subplot(3, 3, i + 1) +# plt.imshow(image.numpy().astype("int")) +# plt.axis("off") + +""" +## Define a model building utility function + +We now define our model building utility. Our model is based on the [ResNet50V2 architecture](https://arxiv.org/abs/1603.05027). +""" + + +def get_training_model(num_classes=10): + resnet50_v2 = tf.keras.applications.ResNet50V2( + weights=None, include_top=False, input_shape=(CROP_TO, CROP_TO, 3), + ) + model = tf.keras.Sequential( + [ + layers.Input((CROP_TO, CROP_TO, 3)), + layers.Rescaling(scale=1.0 / 127.5, offset=-1), + resnet50_v2, + layers.GlobalAveragePooling2D(), + layers.Dense(num_classes), + ] + ) + return model + + +""" +In the interest of reproducibility, we serialize the initial random weights of the +teacher network. +""" + +initial_teacher_model = get_training_model() +initial_teacher_model.save_weights("initial_teacher_model.h5") + +""" +## Train the teacher model + +As noted in Noisy Student Training, if the teacher model is trained with *geometric +ensembling* and when the student model is forced to mimic that, it leads to better +performance. The original work uses [Stochastic Depth](https://arxiv.org/abs/1603.09382) +and [Dropout](https://jmlr.org/papers/v15/srivastava14a.html) to bring in the ensembling +part but for this example, we will use [Stochastic Weight Averaging](https://arxiv.org/abs/1803.05407) +(SWA) which also resembles geometric ensembling. +""" + +# Define the callbacks. +reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(patience=3) +early_stopping = tf.keras.callbacks.EarlyStopping( + patience=10, restore_best_weights=True +) + +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + #self.opt = optimizer + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + +# Initialize SWA from tf-hub. +#SWA = tfa.optimizers.SWA +from My_SWA import My_SWA + +# Compile and train the teacher model. +teacher_model = get_training_model() +teacher_model.load_weights("initial_teacher_model.h5") +teacher_model.compile( + # Notice that we are wrapping our optimizer within SWA + optimizer=My_SWA(tf.keras.optimizers.Adam()), + loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), + metrics=["accuracy"], +) +history = teacher_model.fit( + train_clean_ds, + epochs=EPOCHS, + validation_data=validation_ds, + callbacks=[reduce_lr, TimeHistory(args.batch_size,args.log_steps)], + verbose=2, +) + +#Evaluate the teacher model on the test set. +_, acc = teacher_model.evaluate(test_ds, verbose=0) +print(f"Test accuracy: {acc}") + +""" +## Define a self-training utility + +For this part, we will borrow the `Distiller` class from [this Keras Example](https://keras.io/examples/vision/knowledge_distillation/). +""" + +# Majority of the code is taken from: +# https://keras.io/examples/vision/knowledge_distillation/ +class SelfTrainer(tf.keras.Model): + def __init__(self, student, teacher): + super(SelfTrainer, self).__init__() + self.student = student + self.teacher = teacher + + def compile( + self, optimizer, metrics, student_loss_fn, distillation_loss_fn, temperature=3, + ): + super(SelfTrainer, self).compile(optimizer=optimizer, metrics=metrics) + self.student_loss_fn = student_loss_fn + self.distillation_loss_fn = distillation_loss_fn + self.temperature = temperature + + def train_step(self, data): + # Since our dataset is a zip of two independent datasets, + # after initially parsing them, we segregate the + # respective images and labels next. + clean_ds, noisy_ds = data + clean_images, _ = clean_ds + noisy_images, y = noisy_ds + + # Forward pass of teacher + teacher_predictions = self.teacher(clean_images, training=False) + + with tf.GradientTape() as tape: + # Forward pass of student + student_predictions = self.student(noisy_images, training=True) + + # Compute losses + student_loss = self.student_loss_fn(y, student_predictions) + distillation_loss = self.distillation_loss_fn( + tf.nn.softmax(teacher_predictions / self.temperature, axis=1), + tf.nn.softmax(student_predictions / self.temperature, axis=1), + ) + total_loss = (student_loss + distillation_loss) / 2 + + # Compute gradients + trainable_vars = self.student.trainable_variables + gradients = tape.gradient(total_loss, trainable_vars) + + # Update weights + self.optimizer.apply_gradients(zip(gradients, trainable_vars)) + + # Update the metrics configured in `compile()` + self.compiled_metrics.update_state( + y, tf.nn.softmax(student_predictions, axis=1) + ) + + # Return a dict of performance + results = {m.name: m.result() for m in self.metrics} + results.update({"total_loss": total_loss}) + return results + + def test_step(self, data): + # During inference, we only pass a dataset consisting images and labels. + x, y = data + + # Compute predictions + y_prediction = self.student(x, training=False) + + # Update the metrics + self.compiled_metrics.update_state(y, tf.nn.softmax(y_prediction, axis=1)) + + # Return a dict of performance + results = {m.name: m.result() for m in self.metrics} + return results + + +""" +The only difference in this implementation is the way loss is being calculated. **Instead +of weighted the distillation loss and student loss differently we are taking their +average following Noisy Student Training**. +""" + +""" +## Train the student model +""" + +# Define the callbacks. +# We are using a larger decay factor to stabilize the training. +reduce_lr = tf.keras.callbacks.ReduceLROnPlateau( + patience=3, factor=0.5, monitor="val_accuracy" +) +early_stopping = tf.keras.callbacks.EarlyStopping( + patience=10, restore_best_weights=True, monitor="val_accuracy" +) + +# Compile and train the student model. +self_trainer = SelfTrainer(student=get_training_model(), teacher=teacher_model) +self_trainer.compile( + # Notice we are *not* using SWA here. + optimizer="adam", + metrics=["accuracy"], + student_loss_fn=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), + distillation_loss_fn=tf.keras.losses.KLDivergence(), + temperature=10, +) +history = self_trainer.fit( + consistency_training_ds, + epochs=EPOCHS, + validation_data=validation_ds, + callbacks=[reduce_lr, TimeHistory(args.batch_size,args.log_steps)], + verbose=2 +) + +# Evaluate the student model. +acc = self_trainer.evaluate(test_ds, verbose=0) +print(f"Test accuracy from student model: {acc}") + +""" +## Assess the robustness of the models + +A standard benchmark of assessing the robustness of vision models is to record their +performance on corrupted datasets like ImageNet-C and CIFAR-10-C both of which were +proposed in [Benchmarking Neural Network Robustness to Common Corruptions and +Perturbations](https://arxiv.org/abs/1903.12261). For this example, we will be using the +CIFAR-10-C dataset which has 19 different corruptions on 5 different severity levels. To +assess the robustness of the models on this dataset, we will do the following: + +* Run the pre-trained models on the highest level of severities and obtain the top-1 +accuracies. +* Compute the mean top-1 accuracy. + +For the purpose of this example, we won't be going through these steps. This is why we +trained the models for only 5 epochs. You can check out [this +repository](https://github.com/sayakpaul/Consistency-Training-with-Supervision) that +demonstrates the full-scale training experiments and also the aforementioned assessment. +The figure below presents an executive summary of that assessment: + +![](https://i.ibb.co/HBJkM9R/image.png) + +**Mean Top-1** results stand for the CIFAR-10-C dataset and **Test Top-1** results stand +for the CIFAR-10 test set. It's clear that consistency training has an advantage on not +only enhancing the model robustness but also on improving the standard test performance. +""" \ No newline at end of file -- Gitee From 5d5b3400cfc948bfaee5c371e6239b43e7034d53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:15:14 +0000 Subject: [PATCH 34/54] =?UTF-8?q?convmixer=5FID2501=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 284 ++++++++++ .../README.md | 183 +++++++ .../modelzoo_level.txt | 0 .../requirements.txt | 6 + .../test/train_full_1p.sh | 171 ++++++ .../test/train_performance_1p_dynamic_eval.sh | 183 +++++++ .../test/train_performance_1p_force.sh | 187 +++++++ .../test/train_performance_1p_static_eval.sh | 171 ++++++ .../train.py | 518 ++++++++++++++++++ 9 files changed, 1703 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh create mode 100644 TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_performance_1p_force.sh create mode 100644 TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh create mode 100644 TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/train.py diff --git a/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..bbeecbb18 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/README.md @@ -0,0 +1,183 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain): Image Classification** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.02.17** + +**大小(Size):6.9M** + +**框架(Framework):TensorFlow_2.4.1** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Research** + +**描述(Description):基于patch为输入训练的视觉模型** + +

概述

+ + convmixer是一个极其简单的模型,在思想上与 ViT 和更基本的 MLP-Mixer 相似,这些模型直接将 patch 作为输入进行操作,分离空间和通道维度的混合,并在整个网络中保持相同的大小和分辨率。convmixer仅使用标准卷积来实现混合步骤。尽管它很简单,但研究表明,除了优于 ResNet 等经典视觉模型之外,ConvMixer 在类似的参数计数和数据集大小方面也优于 ViT、MLP-Mixer 及其一些变体。 + +- 参考论文: + + [https://arxiv.org/abs/2105.01601](https://arxiv.org/abs/2105.01601) + +- 参考实现: + + [https://github.com/keras-team/keras-io/blob/master/examples/vision/convmixer.py](https://github.com/keras-team/keras-io/blob/master/examples/vision/convmixer.py) + +- 适配昇腾 AI 处理器的实现:skip + + [https://gitee.com/jelly_111/research_tf2/tree/master/convmixer_ID2501_for_TensorFlow2.X](https://gitee.com/jelly_111/research_tf2/tree/master/convmixer_ID2501_for_TensorFlow2.X) + +- 通过Git获取对应commit\_id的代码方法如下: + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + +## 默认配置 + +- 主要训练超参(单卡): + - batch_size: 128 + - epochs: 10 + - lr: 0.001 + - weight decay:0.0001 + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 + + +``` + npu_device.global_options().precision_mode='allow_mix_precision' + npu_device.open().as_default() +``` + + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + +

快速上手

+ +## 数据集准备 + +1. 用户需自行下载cifar10训练数据集,应有如下结构 + ``` + cifar10/ + ├── batches.meta + ├── data_batch_1 + ├── data_batch_1 + └── ... + ``` + +## 模型训练 + +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + 2. 单卡训练 + + 2.1 配置train_full_1p.sh脚本中`data_path`(脚本路径convmixer_ID2501_for_TensorFlow2.X/test/train_full_1p.sh),请用户根据实际路径配置,数据集参数如下所示: + + --data_path=/home/cifar10 + + 2.2 1p指令如下: + + bash train_full_1p.sh --data_path=/home/cifar10 + + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备。 + +- 模型训练。 + + 参考“模型训练”中训练步骤。 + +- 模型评估。 + + 参考“模型训练”中验证步骤。 + +

高级参考

+ +## 脚本和示例代码 + +``` +convmixer_ID2501_for_TensorFlow2.X/ +├── LICENSE +├── modelzoo_level.txt +├── README.md +├── requirements.txt +├── test +│   ├── train_full_1p.sh +│   ├── train_performance_1p_static_eval.sh +│   ├── train_performance_1p_dynamic_eval.sh +├── train.py + +``` + +## 脚本参数 + +``` +--data_dir 训练数据集路径 +--epochs 训练epoch设置 +--batch_size 训练bs设置 +``` + +## 训练过程 + +1. 通过“模型训练”中的训练指令启动单卡训练。 +2. 将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 +3. 模型存储路径为“curpath/output/ASCEND_DEVICE_ID”,包括训练的log文件。 +4. 以多卡训练为例,loss信息在文件curpath/output/{ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。 + +## 推理/验证过程 + +``` + NA + +``` diff --git a/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..82f1e9fcd --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/requirements.txt @@ -0,0 +1,6 @@ +os +ast +numpy +matplotlib +tensorflow +tensorflow_addons \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..fc3018bb8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,171 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="convmixer_ID2501_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=10 +#训练step +#train_steps=50000 +#学习率 +learning_rate=0.001 +weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_static_eval.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 $cur_path/train.py --data_dir=${data_path} \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --lr=${learning_rate} \ + --weight_decay=${weight_decay} \ + --force=True \ + --eval_static=True \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$15}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh new file mode 100644 index 000000000..de4f3653d --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh @@ -0,0 +1,183 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="convmixer_ID2501_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=1 +#训练step +#train_steps=50000 +#学习率 +learning_rate=0.001 +weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_static_eval.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 $cur_path/train.py --data_dir=${data_path} \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --lr=${learning_rate} \ + --weight_decay=${weight_decay} \ + --force=True \ + --eval_static=False \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=${e2e_time} +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep Accuracy|awk '{print $3}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 + +#cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep loss|awk '{print $9}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +ModelStatus="图执行FAIL" +DTS_Number="DTS2021090622224" +error_msg="op type Conv2DBackpropFilter is not found in this op store." +Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +error_msg=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|tail -l` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log + diff --git a/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_performance_1p_force.sh b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_performance_1p_force.sh new file mode 100644 index 000000000..67911af30 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_performance_1p_force.sh @@ -0,0 +1,187 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="convmixer_ID2501_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=1 +#训练step +#train_steps=50000 +#学习率 +learning_rate=0.001 +weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_static_eval.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 $cur_path/train.py --data_dir=${data_path} \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --lr=${learning_rate} \ + --weight_decay=${weight_decay} \ + --force=False \ + --eval_static=True \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#参数回改 +#sed -i "s|${datth}/th}//io//tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_C'/|g" ${cur_paath}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=${e2e_time} +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep Accuracy|awk '{print $3}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 + +#cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep loss|awk '{print $9}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +ModelStatus="图执行FAIL" +DTS_Number="DTS2022011810830" +error_msg="device:CPU:0 container: localhost name: _AnonymousVar0" +Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +error_msg=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|tail -l` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log + diff --git a/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh new file mode 100644 index 000000000..f6db19b95 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh @@ -0,0 +1,171 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="convmixer_ID2501_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=1 +#训练step +#train_steps=50000 +#学习率 +learning_rate=0.001 +weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_static_eval.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 $cur_path/train.py --data_dir=${data_path} \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --lr=${learning_rate} \ + --weight_decay=${weight_decay} \ + --force=True \ + --eval_static=True \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$15}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..d9465f93e --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/convmixer_ID2501_for_TensorFlow2.X/train.py @@ -0,0 +1,518 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Title: Image classification with ConvMixer +Author: [Sayak Paul](https://twitter.com/RisingSayak) +Date created: 2021/10/12 +Last modified: 2021/10/12 +Description: An all-convolutional network applied to patches of images. +""" +""" +## Introduction + +Vision Transformers (ViT; [Dosovitskiy et al.](https://arxiv.org/abs/1612.00593)) extract +small patches from the input images, linearly project them, and then apply the +Transformer ([Vaswani et al.](https://arxiv.org/abs/1706.03762)) blocks. The application +of ViTs to image recognition tasks is quickly becoming a promising area of research, +because ViTs eliminate the need to have strong inductive biases (such as convolutions) for +modeling locality. This presents them as a general computation primititive capable of +learning just from the training data with as minimal inductive priors as possible. ViTs +yield great downstream performance when trained with proper regularization, data +augmentation, and relatively large datasets. + +In the [Patches Are All You Need](https://openreview.net/pdf?id=TVHS5Y4dNvM) paper (note: at +the time of writing, it is a submission to the ICLR 2022 conference), the authors extend +the idea of using patches to train an all-convolutional network and demonstrate +competitive results. Their architecture namely **ConvMixer** uses recipes from the recent +isotrophic architectures like ViT, MLP-Mixer +([Tolstikhin et al.](https://arxiv.org/abs/2105.01601)), such as using the same +depth and resolution across different layers in the network, residual connections, +and so on. + +In this example, we will implement the ConvMixer model and demonstrate its performance on +the CIFAR-10 dataset. + +To use the AdamW optimizer, we need to install TensorFlow Addons: + +```shell +pip install -U -q tensorflow-addons +``` +""" + +""" +## Imports +""" +import npu_device +import time +import os +import ast +from tensorflow.keras import layers +from tensorflow import keras +from tensorflow.python.keras import backend as K +from tensorflow.python.keras.datasets.cifar import load_batch +from tensorflow.python.eager import context +# import matplotlib.pyplot as plt +import tensorflow_addons as tfa +import tensorflow as tf +import numpy as np +from tensorflow.python.framework import ops +import argparse +""" +## Hyperparameters + +To keep run time short, we will train the model for only 10 epochs. To focus on +the core ideas of ConvMixer, we will not use other training-specific elements like +RandAugment ([Cubuk et al.](https://arxiv.org/abs/1909.13719)). If you are interested in +learning more about those details, please refer to the +[original paper](https://openreview.net/pdf?id=TVHS5Y4dNvM). +""" +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_dir', default="../cifar-10-batches-py/", + help="""directory to data""") + parser.add_argument('--lr', default=0.001, type=float, + help="""learning rate""") + parser.add_argument('--weight_decay', default=0.0001, type=float, + help="""weight decay""") + parser.add_argument('--batch_size', default=128, type=int, + help="""batch size for 1p""") + parser.add_argument('--epochs', default=10, type=int, + help="""epochs""") + parser.add_argument('--eval_static', dest="eval_static", type=ast.literal_eval, + help='eval static or not') + parser.add_argument('--force', dest="force", type=ast.literal_eval, + help='force preprocessing on CPU') + #===============================NPU Migration========================================= + parser.add_argument("--log_steps", default=50, type=int, + help="TimeHis log Step.") + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval, help='auto_tune flag, default is False') + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + +args = parse_args() +data_path = args.data_dir +learning_rate = args.lr +weight_decay = args.weight_decay +batch_size = args.batch_size +num_epochs = args.epochs + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + if args.auto_tune: + npu_device.global_options().auto_tune_mode="RL,GA" + npu_device.open().as_default() +#===============================NPU Migration========================================= + +print('npu_device loaded') +npu_config() + +""" +## Load the CIFAR-10 dataset +""" +def load_data(data_path): + num_train_samples = 50000 + x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8') + y_train = np.empty((num_train_samples,), dtype='uint8') + + for i in range(1, 6): + fpath = os.path.join(data_path, 'data_batch_' + str(i)) + (x_train[(i - 1) * 10000:i * 10000, :, :, :], + y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath) + + fpath = os.path.join(data_path, 'test_batch') + x_test, y_test = load_batch(fpath) + + y_train = np.reshape(y_train, (len(y_train), 1)) + y_test = np.reshape(y_test, (len(y_test), 1)) + + if K.image_data_format() == 'channels_last': + x_train = x_train.transpose(0, 2, 3, 1) + x_test = x_test.transpose(0, 2, 3, 1) + + x_test = x_test.astype(x_train.dtype) + y_test = y_test.astype(y_train.dtype) + + return (x_train, y_train), (x_test, y_test) + +(x_train, y_train), (x_test, y_test) = load_data(data_path) + +val_split = 0.1 + +val_indices = int(len(x_train) * val_split) +new_x_train, new_y_train = x_train[val_indices:], y_train[val_indices:] +x_val, y_val = x_train[:val_indices], y_train[:val_indices] + +print(f"Training data samples: {len(new_x_train)}") +print(f"Validation data samples: {len(x_val)}") +print(f"Test data samples: {len(x_test)}") + +""" +## Prepare `tf.data.Dataset` objects + +Our data augmentation pipeline is different from what the authors used for the CIFAR-10 +dataset, which is fine for the purpose of the example. +""" + +image_size = 32 +auto = tf.data.AUTOTUNE +if args.force: + with context.device('CPU:0'): + data_augmentation = keras.Sequential( + [layers.RandomCrop(image_size, image_size), layers.RandomFlip("horizontal"),], + name="data_augmentation", + ) +else: + data_augmentation = keras.Sequential( + [layers.RandomCrop(image_size, image_size), layers.RandomFlip("horizontal"),], + name="data_augmentation", + ) + +def make_datasets(images, labels, is_train=False): + dataset = tf.data.Dataset.from_tensor_slices((images, labels)) + if is_train: + dataset = dataset.shuffle(batch_size * 10) + if args.eval_static: + dataset = dataset.batch(batch_size, drop_remainder=True) + else: + dataset = dataset.batch(batch_size) + if is_train: + dataset = dataset.map( + lambda x, y: (data_augmentation(x), y), num_parallel_calls=auto + ) + return dataset.prefetch(auto) + + +train_dataset = make_datasets(new_x_train, new_y_train, is_train=True) +val_dataset = make_datasets(x_val, y_val) +test_dataset = make_datasets(x_test, y_test) + +""" +## ConvMixer utilities + +The following figure (taken from the original paper) depicts the ConvMixer model: + +![](https://i.imgur.com/yF8actg.png) + +ConvMixer is very similar to the MLP-Mixer, model with the following key +differences: + +* Instead of using fully-connected layers, it uses standard convolution layers. +* Instead of LayerNorm (which is typical for ViTs and MLP-Mixers), it uses BatchNorm. + +Two types of convolution layers are used in ConvMixer. **(1)**: Depthwise convolutions, +for mixing spatial locations of the images, **(2)**: Pointwise convolutions (which follow +the depthwise convolutions), for mixing channel-wise information across the patches. +Another keypoint is the use of *larger kernel sizes* to allow a larger receptive field. +""" +@ops.RegisterGradient("FastGelu") +def _fast_gelu_grad(op,grad): + """ The gradient for fastgelu + + Args: + op:The fastgelu operations that we are differentiating,which we can us to find the inputs and outputs of the original op. + grad: Gradient with respect to the output of the fast_gelu op. + + Returns: + Gradient with respect to the input of fast_gelu + """ + return [npu_device.gen_npu_ops.fast_gelu_grad(grad,op.inputs[0])] + +def activation_block(x): + #x = layers.Activation("gelu")(x) + x=npu_device.gen_npu_ops.fast_gelu(x) + return layers.BatchNormalization()(x) + + +def conv_stem(x, filters: int, patch_size: int): + x = layers.Conv2D(filters, kernel_size=patch_size, strides=patch_size)(x) + return activation_block(x) + + +def conv_mixer_block(x, filters: int, kernel_size: int): + # Depthwise convolution. + x0 = x + x = layers.DepthwiseConv2D(kernel_size=kernel_size, padding="same")(x) + x = layers.Add()([activation_block(x), x0]) # Residual. + + # Pointwise convolution. + x = layers.Conv2D(filters, kernel_size=1)(x) + x = activation_block(x) + + return x + + +def get_conv_mixer_256_8( + image_size=32, filters=256, depth=8, kernel_size=5, patch_size=2, num_classes=10 +): + """ConvMixer-256/8: https://openreview.net/pdf?id=TVHS5Y4dNvM. + The hyperparameter values are taken from the paper. + """ + inputs = keras.Input((image_size, image_size, 3)) + x = layers.Rescaling(scale=1.0 / 255)(inputs) + + # Extract patch embeddings. + x = conv_stem(x, filters, patch_size) + + # ConvMixer blocks. + for _ in range(depth): + x = conv_mixer_block(x, filters, kernel_size) + + # Classification block. + x = layers.GlobalAvgPool2D()(x) + outputs = layers.Dense(num_classes, activation="softmax")(x) + + return keras.Model(inputs, outputs) + + +""" +The model used in this experiment is termed as **ConvMixer-256/8** where 256 denotes the +number of channels and 8 denotes the depth. The resulting model only has 0.8 million +parameters. +""" + +""" +## Model training and evaluation utility +""" + +# Code reference: +# https://keras.io/examples/vision/image_classification_with_vision_transformer/. + +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + #self.opt = optimizer + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + +def run_experiment(model): + optimizer = tfa.optimizers.AdamW( + learning_rate=learning_rate, weight_decay=weight_decay + ) + + model.compile( + optimizer=optimizer, + loss="sparse_categorical_crossentropy", + metrics=["accuracy"], + ) + + checkpoint_filepath = "/tmp/checkpoint" + checkpoint_callback = keras.callbacks.ModelCheckpoint( + checkpoint_filepath, + monitor="val_accuracy", + save_best_only=True, + save_weights_only=True, + ) + + history = model.fit( + train_dataset, + validation_data=val_dataset, + epochs=num_epochs, + callbacks=[checkpoint_callback,TimeHistory(batch_size,args.log_steps)], + verbose=2 + ) + + model.load_weights(checkpoint_filepath) + # _, accuracy = model.evaluate(test_dataset) + # print(f"Test accuracy: {round(accuracy * 100, 2)}%") + + return history, model + + +""" +## Train and evaluate model +""" + +conv_mixer_model = get_conv_mixer_256_8() +history, conv_mixer_model = run_experiment(conv_mixer_model) + +""" +The gap in training and validation performance can be mitigated by using additional +regularization techniques. Nevertheless, being able to get to ~83% accuracy within 10 +epochs with 0.8 million parameters is a strong result. +""" + +""" +## Visualizing the internals of ConvMixer + +We can visualize the patch embeddings and the learned convolution filters. Recall +that each patch embedding and intermediate feature map have the same number of channels +(256 in this case). This will make our visualization utility easier to implement. +""" + +# Code reference: https://bit.ly/3awIRbP. + + +# def visualization_plot(weights, idx=1): +# # First, apply min-max normalization to the +# # given weights to avoid isotrophic scaling. +# p_min, p_max = weights.min(), weights.max() +# weights = (weights - p_min) / (p_max - p_min) + +# # Visualize all the filters. +# num_filters = 256 +# plt.figure(figsize=(8, 8)) + +# for i in range(num_filters): +# current_weight = weights[:, :, :, i] +# if current_weight.shape[-1] == 1: +# current_weight = current_weight.squeeze() +# ax = plt.subplot(16, 16, idx) +# ax.set_xticks([]) +# ax.set_yticks([]) +# plt.imshow(current_weight) +# idx += 1 + + +# We first visualize the learned patch embeddings. +# patch_embeddings = conv_mixer_model.layers[2].get_weights()[0] +# visualization_plot(patch_embeddings) + +""" +Even though we did not train the network to convergence, we can notice that different +patches show different patterns. Some share similarity with others while some are very +different. These visualizations are more salient with larger image sizes. + +Similarly, we can visualize the raw convolution kernels. This can help us understand +the patterns to which a given kernel is receptive. +""" + +# First, print the indices of the convolution layers that are not +# pointwise convolutions. +# for i, layer in enumerate(conv_mixer_model.layers): +# if isinstance(layer, layers.DepthwiseConv2D): +# if layer.get_config()["kernel_size"] == (5, 5): +# print(i, layer) + +# idx = 26 # Taking a kernel from the middle of the network. + +# kernel = conv_mixer_model.layers[idx].get_weights()[0] +# kernel = np.expand_dims(kernel.squeeze(), axis=2) +# visualization_plot(kernel) + +""" +We see that different filters in the kernel have different locality spans, and this pattern +is likely to evolve with more training. +""" + +""" +## Final notes + +There's been a recent trend on fusing convolutions with other data-agnostic operations +like self-attention. Following works are along this line of research: + +* ConViT ([d'Ascoli et al.](https://arxiv.org/abs/2103.10697)) +* CCT ([Hassani et al.](https://arxiv.org/abs/2104.05704)) +* CoAtNet ([Dai et al.](https://arxiv.org/abs/2106.04803)) +""" \ No newline at end of file -- Gitee From fdee78017c48de6776095e2050031f073bd31785 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:15:34 +0000 Subject: [PATCH 35/54] deeplabv3_plus_ID2503_for_TensorFlow2.X --- .../LICENSE | 284 +++++++++++ .../README.md | 186 +++++++ .../configs/ops_info.json | 7 + .../modelzoo_level.txt | 3 + .../requirements.txt | 8 + .../run_1p.sh | 1 + .../test/train_full_1p.sh | 167 +++++++ .../test/train_performance_1p.sh | 167 +++++++ .../train.py | 470 ++++++++++++++++++ 9 files changed, 1293 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/configs/ops_info.json create mode 100644 TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/run_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/train.py diff --git a/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..5e0138638 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/README.md @@ -0,0 +1,186 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Instance Segmentation** + +**版本(Version):1.1** + +**修改时间(Modified) :2021.10.01** + +**大小(Size)**_**:324KB** + +**框架(Framework):TensorFlow 2.6** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Benchmark** + +**描述(Description):基于TensorFlow框架的DeepLabV3+语义分割模型训练代码** + +

概述

+ +- 训练用于多类语义分割的 DeepLabV3+ 模型。 + +- 参考论文: + + skip + +- 参考实现: + + [https://github.com/keras-team/keras-io/blob/master/examples/vision/deeplabv3_plus.py](https://github.com/keras-team/keras-io/blob/master/examples/vision/deeplabv3_plus.py) + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + +## 默认配置 +- 网络结构 + +- 训练超参(单卡): + - Batch size: 4 + - Train epochs:25 + + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +相关代码示例。 + +``` +npu_device.global_options().precision_mode = args.precision_mode +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + + +

快速上手

+ +## 数据集准备 + +1. 数据集请用户自行获取。 + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2.1 设置单卡训练参数(脚本位于deeplabv3_plus_ID2503_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。 + + + ``` + batch_size=4 + #训练step + train_epochs=25 + ``` + + 2.2 单卡训练指令(脚本位于deeplabv3_plus_ID2503_for_TensorFlow2.X/test/train_full_1p.sh) + + ``` + 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡 + bash train_full_1p.sh --data_path=xx + 数据集应有如下结构(数据切分可能不同) + ├── resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5 + ├── ICCV15_fashion_dataset(ATR) + │   └── level_human_parsing + │   ├── JPEGImages + │ │ ├── 997_1.jpg + │ │ ├── ... + │   └── SegmentationClassAug + │ ├── 997_1.png + │ ├── ... + └── instance-level_human_parsing + │   └── instance-level_human_parsing + │   ├── Testing + │ │ ├── ... + │   ├── Training + │ │ ├── ... + │ ├── Validation + │ │ ├── ... + │ └── human_colormap.mat + ``` + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备 + +- 模型训练 + + 请参考“快速上手”章节 + +

高级参考

+ +## 脚本和示例代码 + + ├── README.md //说明文档 + ├── requirements.txt //依赖 + ├── train.py //主脚本 + ├── LICENSE + ├── run_1p.sh + ├── test + | |—— train_full_1p.sh //单卡训练脚本 + | |—— train_performance_1p.sh //单卡训练脚本 + ├── configs + | |—— ops_info.json + +## 脚本参数 + +``` +batch_size 训练batch_size +train_epochs 总训练epoch数 +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡训练。 +将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/configs/ops_info.json b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/configs/ops_info.json new file mode 100644 index 000000000..8fcfa3d49 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/configs/ops_info.json @@ -0,0 +1,7 @@ +{ + "black-list":{ + "to-add":[ + "ReduceSumD" + ] + } +} \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..bf6b86871 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:OK +PercisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..ea95d1a6a --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/requirements.txt @@ -0,0 +1,8 @@ +os +cv2 +glob +scipy +numpy +argparse +tensorflow +matplotlib \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/run_1p.sh b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/run_1p.sh new file mode 100644 index 000000000..ef6943627 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/run_1p.sh @@ -0,0 +1 @@ +python3 train.py \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..98ab5a9ae --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,167 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=4 +#网络名称,同目录名称 +Network="deeplabv3_plus_ID2503_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=25 +#训练step +#train_steps=50000 +#学习率 +learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=True +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_full_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path +cp $data_path/../resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5 /root/.keras/models/ + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 $cur_path/train.py --data_dir=${data_path} \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --lr=${learning_rate} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print$NF}' | awk 'BEGIN {max=0} {if ($1+0 > max+0) max=$1} END {print max}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '250/250' $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..01ce2b0f4 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,167 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=4 +#网络名称,同目录名称 +Network="deeplabv3_plus_ID2503_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=5 +#训练step +#train_steps=50000 +#学习率 +learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=True +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path +cp $data_path/../resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5 /root/.keras/models/ + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 $cur_path/train.py --data_dir=${data_path} \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --lr=${learning_rate} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '250/250' $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..6e22adc68 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/deeplabv3_plus_ID2503_for_TensorFlow2.X/train.py @@ -0,0 +1,470 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Title: Multiclass semantic segmentation using DeepLabV3+ +Author: [Soumik Rakshit](http://github.com/soumik12345) +Date created: 2021/08/31 +Last modified: 2021/09/1 +Description: Implement DeepLabV3+ architecture for Multi-class Semantic Segmentation. +""" +""" +## Introduction + +Semantic segmentation, with the goal to assign semantic labels to every pixel in an image, +is an essential computer vision task. In this example, we implement +the **DeepLabV3+** model for multi-class semantic segmentation, a fully-convolutional +architecture that performs well on semantic segmentation benchmarks. + +### References: + +- [Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation](https://arxiv.org/pdf/1802.02611.pdf) +- [Rethinking Atrous Convolution for Semantic Image Segmentation](https://arxiv.org/abs/1706.05587) +- [DeepLab: Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs](https://arxiv.org/abs/1606.00915) +""" + +""" +## Downloading the data + +We will use the [Crowd Instance-level Human Parsing Dataset](https://arxiv.org/abs/1811.12596) +for training our model. The Crowd Instance-level Human Parsing (CIHP) dataset has 38,280 diverse human images. +Each image in CIHP is labeled with pixel-wise annotations for 20 categories, as well as instance-level identification. +This dataset can be used for the "human part segmentation" task. +""" +import npu_device +import time +import ast +import os +import cv2 +import argparse +import numpy as np +from glob import glob +from scipy.io import loadmat +# import matplotlib.pyplot as plt + +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import layers + +"""shell +gdown https://drive.google.com/uc?id=1B9A9UCJYMwTL4oBEo4RZfbMZMaZhKJaz +unzip -q instance-level-human-parsing.zip +""" +#===============================NPU Migration========================================= +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_dir', default="../", help="""directory to data""") + parser.add_argument('--batch_size', default=4, type=int, help="""batch size for 1p""") + parser.add_argument('--lr', default=0.001, type=float, help="""learning rate""") + parser.add_argument('--epochs', default=25, type=int, help="""epochs""") + parser.add_argument("--log_steps", default=50, type=int, help="TimeHis log Step.") + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval, help='autotune, default is False') + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + +#加入判断后再设置为默认 + +args = parse_args() + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + if args.auto_tune: + npu_device.global_options().auto_tune_mode="RL,GA" + npu_device.open().as_default() +#===============================NPU Migration========================================= + +print('npu_device loaded') +npu_config() + +num_epochs = args.epochs +lr = args.lr +""" +## Creating a TensorFlow Dataset + +Training on the entire CIHP dataset with 38,280 images takes a lot of time, hence we will be using +a smaller subset of 200 images for training our model in this example. +""" + +IMAGE_SIZE = 512 +BATCH_SIZE = args.batch_size +NUM_CLASSES = 20 +DATA_DIR = os.path.join(args.data_dir, 'instance-level_human_parsing/instance-level_human_parsing/Training') +NUM_TRAIN_IMAGES = 1000 +NUM_VAL_IMAGES = 50 + +train_images = sorted(glob(os.path.join(DATA_DIR, "Images/*")))[:NUM_TRAIN_IMAGES] +train_masks = sorted(glob(os.path.join(DATA_DIR, "Category_ids/*")))[:NUM_TRAIN_IMAGES] +val_images = sorted(glob(os.path.join(DATA_DIR, "Images/*")))[ + NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES +] +val_masks = sorted(glob(os.path.join(DATA_DIR, "Category_ids/*")))[ + NUM_TRAIN_IMAGES : NUM_VAL_IMAGES + NUM_TRAIN_IMAGES +] + + +def read_image(image_path, mask=False): + image = tf.io.read_file(image_path) + if mask: + image = tf.image.decode_png(image, channels=1) + image.set_shape([None, None, 1]) + image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE]) + else: + image = tf.image.decode_png(image, channels=3) + image.set_shape([None, None, 3]) + image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE]) + image = image / 127.5 - 1 + return image + + +def load_data(image_list, mask_list): + image = read_image(image_list) + mask = read_image(mask_list, mask=True) + return image, mask + + +def data_generator(image_list, mask_list): + dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list)) + dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE) + dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) + return dataset + + +train_dataset = data_generator(train_images, train_masks) +val_dataset = data_generator(val_images, val_masks) + +print("Train Dataset:", train_dataset) +print("Val Dataset:", val_dataset) + +""" +## Building the DeepLabV3+ model + +DeepLabv3+ extends DeepLabv3 by adding an encoder-decoder structure. The encoder module +processes multiscale contextual information by applying dilated convolution at multiple +scales, while the decoder module refines the segmentation results along object boundaries. + +![](https://github.com/lattice-ai/DeepLabV3-Plus/raw/master/assets/deeplabv3_plus_diagram.png) + +**Dilated convolution:** With dilated convolution, as we go deeper in the network, we can keep the +stride constant but with larger field-of-view without increasing the number of parameters +or the amount of computation. Besides, it enables larger output feature maps, which is +useful for semantic segmentation. + +The reason for using **Dilated Spatial Pyramid Pooling** is that it was shown that as the +sampling rate becomes larger, the number of valid filter weights (i.e., weights that +are applied to the valid feature region, instead of padded zeros) becomes smaller. +""" + + +def convolution_block( + block_input, + num_filters=256, + kernel_size=3, + dilation_rate=1, + padding="same", + use_bias=False, +): + x = layers.Conv2D( + num_filters, + kernel_size=kernel_size, + dilation_rate=dilation_rate, + padding="same", + use_bias=use_bias, + kernel_initializer=keras.initializers.HeNormal(), + )(block_input) + x = layers.BatchNormalization()(x) + return tf.nn.relu(x) + + +def DilatedSpatialPyramidPooling(dspp_input): + dims = dspp_input.shape + x = layers.AveragePooling2D(pool_size=(dims[-3], dims[-2]))(dspp_input) + x = convolution_block(x, kernel_size=1, use_bias=True) + out_pool = layers.UpSampling2D( + size=(dims[-3] // x.shape[1], dims[-2] // x.shape[2]), interpolation="bilinear", + )(x) + + out_1 = convolution_block(dspp_input, kernel_size=1, dilation_rate=1) + out_6 = convolution_block(dspp_input, kernel_size=3, dilation_rate=6) + out_12 = convolution_block(dspp_input, kernel_size=3, dilation_rate=12) + out_18 = convolution_block(dspp_input, kernel_size=3, dilation_rate=18) + + x = layers.Concatenate(axis=-1)([out_pool, out_1, out_6, out_12, out_18]) + output = convolution_block(x, kernel_size=1) + return output + + +""" +The encoder features are first bilinearly upsampled by a factor 4, and then +concatenated with the corresponding low-level features from the network backbone that +have the same spatial resolution. For this example, we +use a ResNet50 pretrained on ImageNet as the backbone model, and we use +the low-level features from the `conv4_block6_2_relu` block of the backbone. +""" + + +def DeeplabV3Plus(image_size, num_classes): + model_input = keras.Input(shape=(image_size, image_size, 3)) + resnet50 = keras.applications.ResNet50( + weights="imagenet", include_top=False, input_tensor=model_input + ) + x = resnet50.get_layer("conv4_block6_2_relu").output + x = DilatedSpatialPyramidPooling(x) + + input_a = layers.UpSampling2D( + size=(image_size // 4 // x.shape[1], image_size // 4 // x.shape[2]), + interpolation="bilinear", + )(x) + input_b = resnet50.get_layer("conv2_block3_2_relu").output + input_b = convolution_block(input_b, num_filters=48, kernel_size=1) + + x = layers.Concatenate(axis=-1)([input_a, input_b]) + x = convolution_block(x) + x = convolution_block(x) + x = layers.UpSampling2D( + size=(image_size // x.shape[1], image_size // x.shape[2]), + interpolation="bilinear", + )(x) + model_output = layers.Conv2D(num_classes, kernel_size=(1, 1), padding="same")(x) + return keras.Model(inputs=model_input, outputs=model_output) + + +model = DeeplabV3Plus(image_size=IMAGE_SIZE, num_classes=NUM_CLASSES) +# model.summary() + +""" +## Training + +We train the model using sparse categorical crossentropy as the loss function, and +Adam as the optimizer. +""" + +loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True) +optimizer=keras.optimizers.Adam(learning_rate=lr) +optimizer = npu_device.train.optimizer.NpuLossScaleOptimizer(optimizer) +model.compile( + optimizer=optimizer, + loss=loss, + metrics=["accuracy"], +) + +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, optimizer, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + self.opt = optimizer + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + print( + 'Train Step: %d/%d / loss_scale = %s / not_overdump_status = %s' % ( + self.last_log_step, self.global_steps, self.opt.loss_scale.numpy(), self.opt.last_step_finite.numpy()), flush=True) + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + +history = model.fit(train_dataset, validation_data=val_dataset, epochs=num_epochs, callbacks=[TimeHistory(args.batch_size,args.log_steps,optimizer)], verbose=2) + +# plt.plot(history.history["loss"]) +# plt.title("Training Loss") +# plt.ylabel("loss") +# plt.xlabel("epoch") +# plt.show() + +# plt.plot(history.history["accuracy"]) +# plt.title("Training Accuracy") +# plt.ylabel("accuracy") +# plt.xlabel("epoch") +# plt.show() + +# plt.plot(history.history["val_loss"]) +# plt.title("Validation Loss") +# plt.ylabel("val_loss") +# plt.xlabel("epoch") +# plt.show() + +# plt.plot(history.history["val_accuracy"]) +# plt.title("Validation Accuracy") +# plt.ylabel("val_accuracy") +# plt.xlabel("epoch") +# plt.show() + +""" +## Inference using Colormap Overlay + +The raw predictions from the model represent a one-hot encoded tensor of shape `(N, 512, 512, 20)` +where each one of the 20 channels is a binary mask corresponding to a predicted label. +In order to visualize the results, we plot them as RGB segmentation masks where each pixel +is represented by a unique color corresponding to the particular label predicted. We can easily +find the color corresponding to each label from the `human_colormap.mat` file provided as part +of the dataset. We would also plot an overlay of the RGB segmentation mask on the input image as +this further helps us to identify the different categories present in the image more intuitively. +""" + +# Loading the Colormap +# colormap = loadmat( +# "./instance-level_human_parsing/instance-level_human_parsing/human_colormap.mat" +# )["colormap"] +# colormap = colormap * 100 +# colormap = colormap.astype(np.uint8) + + +# def infer(model, image_tensor): +# predictions = model.predict(np.expand_dims((image_tensor), axis=0)) +# predictions = np.squeeze(predictions) +# predictions = np.argmax(predictions, axis=2) +# return predictions + + +# def decode_segmentation_masks(mask, colormap, n_classes): +# r = np.zeros_like(mask).astype(np.uint8) +# g = np.zeros_like(mask).astype(np.uint8) +# b = np.zeros_like(mask).astype(np.uint8) +# for l in range(0, n_classes): +# idx = mask == l +# r[idx] = colormap[l, 0] +# g[idx] = colormap[l, 1] +# b[idx] = colormap[l, 2] +# rgb = np.stack([r, g, b], axis=2) +# return rgb + + +# def get_overlay(image, colored_mask): +# image = tf.keras.preprocessing.image.array_to_img(image) +# image = np.array(image).astype(np.uint8) +# overlay = cv2.addWeighted(image, 0.35, colored_mask, 0.65, 0) +# return overlay + + +# def plot_samples_matplotlib(display_list, figsize=(5, 3)): +# _, axes = plt.subplots(nrows=1, ncols=len(display_list), figsize=figsize) +# for i in range(len(display_list)): +# if display_list[i].shape[-1] == 3: +# axes[i].imshow(tf.keras.preprocessing.image.array_to_img(display_list[i])) +# else: +# axes[i].imshow(display_list[i]) +# plt.show() + + +# def plot_predictions(images_list, colormap, model): +# for image_file in images_list: +# image_tensor = read_image(image_file) +# prediction_mask = infer(image_tensor=image_tensor, model=model) +# prediction_colormap = decode_segmentation_masks(prediction_mask, colormap, 20) +# overlay = get_overlay(image_tensor, prediction_colormap) +# plot_samples_matplotlib( +# [image_tensor, overlay, prediction_colormap], figsize=(18, 14) +# ) + + +""" +### Inference on Train Images +""" + +# plot_predictions(train_images[:4], colormap, model=model) + +# """ +# ### Inference on Validation Images +# """ + +# plot_predictions(val_images[:4], colormap, model=model) \ No newline at end of file -- Gitee From 5ba3c81274c3bb17725cba55bb4f0ceccdd6b4ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:16:25 +0000 Subject: [PATCH 36/54] =?UTF-8?q?DIN=5FID2641=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../DIN_ID2641_for_TensorFlow2.X/LICENSE | 21 ++ .../DIN_ID2641_for_TensorFlow2.X/README.md | 179 ++++++++++++ .../DIN_ID2641_for_TensorFlow2.X/__init__.py | 36 +++ .../DIN_ID2641_for_TensorFlow2.X/model.py | 153 +++++++++++ .../modelzoo_level.txt | 3 + .../DIN_ID2641_for_TensorFlow2.X/modules.py | 94 +++++++ .../npu_convert_dropout.py | 54 ++++ .../DIN_ID2641_for_TensorFlow2.X/npu_ops.py | 256 ++++++++++++++++++ .../preprocess/1_convert_pd.py | 72 +++++ .../preprocess/2_remap_id.py | 116 ++++++++ .../preprocess/__init__.py | 29 ++ .../requirements.txt | 0 .../DIN_ID2641_for_TensorFlow2.X/run_1p.sh | 3 + .../test/train_full_1p.sh | 187 +++++++++++++ .../test/train_performance_1p.sh | 187 +++++++++++++ .../test/train_performance_1p_static.sh | 189 +++++++++++++ .../DIN_ID2641_for_TensorFlow2.X/train.py | 246 +++++++++++++++++ .../DIN_ID2641_for_TensorFlow2.X/utils.py | 148 ++++++++++ 18 files changed, 1973 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/__init__.py create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/model.py create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/modules.py create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/npu_convert_dropout.py create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/npu_ops.py create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/preprocess/1_convert_pd.py create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/preprocess/2_remap_id.py create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/preprocess/__init__.py create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/run_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/test/train_performance_1p_static.sh create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/train.py create mode 100644 TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/utils.py diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..51d555a15 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Ke YU + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..1fc2188ed --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/README.md @@ -0,0 +1,179 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Recommendation** + +**版本(Version):1.1** + +**修改时间(Modified) :2021.10.01** + +**大小(Size)**_**:324KB** + +**框架(Framework):TensorFlow 2.4.1** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Benchmark** + +**描述(Description):基于TensorFlow框架的推荐网络训练代码** + +

概述

+ +- 开源项目Recommender System with TF2.0主要是对经典的推荐算法论文进行复现,包括Matching(召回)(MF、BPR、SASRec等)、Ranking(排序)(DeepFM、DCN等)。 + +- 参考论文: + + [https://arxiv.org/pdf/1706.06978.pdf](https://arxiv.org/pdf/1706.06978.pdf) + +- 参考实现: + + [https://github.com/ZiyaoGeng/Recommender-System-with-TF2.0/tree/master/DIN](https://github.com/ZiyaoGeng/Recommender-System-with-TF2.0/tree/master/DIN) + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + +## 默认配置 +- 网络结构 + +- 训练超参(单卡): + - Batch size: 4096 + - Train epochs:5 + + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +相关代码示例。 + +``` +config_proto = tf.ConfigProto(allow_soft_placement=True) + custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add() + custom_op.name = 'NpuOptimizer' + custom_op.parameter_map["use_off_line"].b = True + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") + config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF + session_config = npu_config_proto(config_proto=config_proto) +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + + +

快速上手

+ +## 数据集准备 + +1. 数据集请用户自行获取。 + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2.1 设置单卡训练参数(脚本位于DIN_ID2641_for_TensorFlow2.X/test/train_performance_1p.sh),示例如下。 + + + ``` + batch_size=4096 + #训练step + train_epochs=5 + ``` + + 2.2 单卡训练指令(脚本位于DIN_ID2641_for_TensorFlow2.X/test) + + ``` + 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡 + bash train_full_1p.sh --data_path=xx + 数据集应有如下结构(数据切分可能不同) + | + ├─meta.pkl + ├─remap.pkl + ├─reviews.pkl + + ``` + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备 + +- 模型训练 + + 请参考“快速上手”章节 + +

高级参考

+ +## 脚本和示例代码 + + ├── README.md //说明文档 + ├── requirements.txt //依赖 + ├── train.py //主脚本 + ├── utils.py + ├── model.py + ├── modules.py + ├── test + | |—— train_full_1p.sh //单卡训练脚本 + | |—— train_performance_1p.sh //单卡训练脚本 + +## 脚本参数 + +``` +batch_size 训练batch_size +train_epochs 总训练epoch数 +其余参数请在utils.py中配置flag默认值 +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡训练。 +将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/__init__.py b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/__init__.py new file mode 100644 index 000000000..4b6fedbd5 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/__init__.py @@ -0,0 +1,36 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on May 23, 2020 + +model: Deep interest network for click-through rate prediction + +@author: Ziyao Geng +""" \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/model.py b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/model.py new file mode 100644 index 000000000..57815e063 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/model.py @@ -0,0 +1,153 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on May 23, 2020 + +model: Deep interest network for click-through rate prediction + +@author: Ziyao Geng +""" +import tensorflow as tf + +from tensorflow.keras import Model +from tensorflow.keras.layers import Embedding, Dense, BatchNormalization, Input, PReLU, Dropout +from tensorflow.keras.regularizers import l2 + +from modules import * + + +class DIN(Model): + def __init__(self, feature_columns, behavior_feature_list, att_hidden_units=(80, 40), + ffn_hidden_units=(80, 40), att_activation='prelu', ffn_activation='prelu', maxlen=40, dnn_dropout=0., embed_reg=1e-4): + """ + DIN + :param feature_columns: A list. dense_feature_columns + sparse_feature_columns + :param behavior_feature_list: A list. the list of behavior feature names + :param att_hidden_units: A tuple or list. Attention hidden units. + :param ffn_hidden_units: A tuple or list. Hidden units list of FFN. + :param att_activation: A String. The activation of attention. + :param ffn_activation: A String. Prelu or Dice. + :param maxlen: A scalar. Maximum sequence length. + :param dropout: A scalar. The number of Dropout. + :param embed_reg: A scalar. The regularizer of embedding. + """ + super(DIN, self).__init__() + self.maxlen = maxlen + + self.dense_feature_columns, self.sparse_feature_columns = feature_columns + + # len + self.other_sparse_len = len(self.sparse_feature_columns) - len(behavior_feature_list) + self.dense_len = len(self.dense_feature_columns) + self.behavior_num = len(behavior_feature_list) + + # other embedding layers + self.embed_sparse_layers = [Embedding(input_dim=feat['feat_num'], + input_length=1, + output_dim=feat['embed_dim'], + embeddings_initializer='random_uniform', + embeddings_regularizer=l2(embed_reg)) + for feat in self.sparse_feature_columns + if feat['feat'] not in behavior_feature_list] + # behavior embedding layers, item id and category id + self.embed_seq_layers = [Embedding(input_dim=feat['feat_num'], + input_length=1, + output_dim=feat['embed_dim'], + embeddings_initializer='random_uniform', + embeddings_regularizer=l2(embed_reg)) + for feat in self.sparse_feature_columns + if feat['feat'] in behavior_feature_list] + + # attention layer + self.attention_layer = Attention_Layer(att_hidden_units, att_activation) + + self.bn = BatchNormalization(trainable=True) + # ffn + self.ffn = [Dense(unit, activation=PReLU() if ffn_activation == 'prelu' else Dice())\ + for unit in ffn_hidden_units] + self.dropout = Dropout(dnn_dropout) + self.dense_final = Dense(1) + + def call(self, inputs): + # dense_inputs and sparse_inputs is empty + # seq_inputs (None, maxlen, behavior_num) + # item_inputs (None, behavior_num) + dense_inputs, sparse_inputs, seq_inputs, item_inputs = inputs + # attention ---> mask, if the element of seq_inputs is equal 0, it must be filled in. + mask = tf.cast(tf.not_equal(seq_inputs[:, :, 0], 0), dtype=tf.float32) # (None, maxlen) + # other + other_info = dense_inputs + for i in range(self.other_sparse_len): + other_info = tf.concat([other_info, self.embed_sparse_layers[i](sparse_inputs[:, i])], axis=-1) + + # seq, item embedding and category embedding should concatenate + seq_embed = tf.concat([self.embed_seq_layers[i](seq_inputs[:, :, i]) for i in range(self.behavior_num)], axis=-1) + item_embed = tf.concat([self.embed_seq_layers[i](item_inputs[:, i]) for i in range(self.behavior_num)], axis=-1) + + # att + user_info = self.attention_layer([item_embed, seq_embed, seq_embed, mask]) # (None, d * 2) + + # concat user_info(att hist), cadidate item embedding, other features + if self.dense_len > 0 or self.other_sparse_len > 0: + info_all = tf.concat([user_info, item_embed, other_info], axis=-1) + else: + info_all = tf.concat([user_info, item_embed], axis=-1) + + info_all = self.bn(info_all) + + # ffn + for dense in self.ffn: + info_all = dense(info_all) + + info_all = self.dropout(info_all) + outputs = tf.nn.sigmoid(self.dense_final(info_all)) + return outputs + + def summary(self): + dense_inputs = Input(shape=(self.dense_len, ), dtype=tf.float32) + sparse_inputs = Input(shape=(self.other_sparse_len, ), dtype=tf.int32) + seq_inputs = Input(shape=(self.maxlen, self.behavior_num), dtype=tf.int32) + item_inputs = Input(shape=(self.behavior_num, ), dtype=tf.int32) + tf.keras.Model(inputs=[dense_inputs, sparse_inputs, seq_inputs, item_inputs], + outputs=self.call([dense_inputs, sparse_inputs, seq_inputs, item_inputs])).summary() + + +def test_model(): + dense_features = [{'feat': 'a'}, {'feat': 'b'}] + sparse_features = [{'feat': 'item_id', 'feat_num': 100, 'embed_dim': 8}, + {'feat': 'cate_id', 'feat_num': 100, 'embed_dim': 8}, + {'feat': 'adv_id', 'feat_num': 100, 'embed_dim': 8}] + behavior_list = ['item_id', 'cate_id'] + features = [dense_features, sparse_features] + model = DIN(features, behavior_list) + model.summary() + + +# test_model() \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..a829ab59b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:NOK +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/modules.py b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/modules.py new file mode 100644 index 000000000..9ed3b07ca --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/modules.py @@ -0,0 +1,94 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on Oct 26, 2020 + +modules of DIN: attention mechanism + +@author: Ziyao Geng +""" + + +import tensorflow as tf +from tensorflow.keras.regularizers import l2 +from tensorflow.keras.layers import Layer, BatchNormalization, Dense + + +class Attention_Layer(Layer): + def __init__(self, att_hidden_units, activation='prelu'): + """ + """ + super(Attention_Layer, self).__init__() + self.att_dense = [Dense(unit, activation=activation) for unit in att_hidden_units] + self.att_final_dense = Dense(1) + + def call(self, inputs): + # query: candidate item (None, d * 2), d is the dimension of embedding + # key: hist items (None, seq_len, d * 2) + # value: hist items (None, seq_len, d * 2) + # mask: (None, seq_len) + q, k, v, mask = inputs + q = tf.tile(q, multiples=[1, k.shape[1]]) # (None, seq_len * d * 2) + q = tf.reshape(q, shape=[-1, k.shape[1], k.shape[2]]) # (None, seq_len, d * 2) + + # q, k, out product should concat + info = tf.concat([q, k, q - k, q * k], axis=-1) + + # dense + for dense in self.att_dense: + info = dense(info) + + outputs = self.att_final_dense(info) # (None, seq_len, 1) + outputs = tf.squeeze(outputs, axis=-1) # (None, seq_len) + + paddings = tf.ones_like(outputs) * (-2 ** 32 + 1) # (None, seq_len) + outputs = tf.where(tf.equal(mask, 0), paddings, outputs) # (None, seq_len) + + # softmax + outputs = tf.nn.softmax(logits=outputs) # (None, seq_len) + outputs = tf.expand_dims(outputs, axis=1) # None, 1, seq_len) + + outputs = tf.matmul(outputs, v) # (None, 1, d * 2) + outputs = tf.squeeze(outputs, axis=1) + + return outputs + + +class Dice(Layer): + def __init__(self): + super(Dice, self).__init__() + self.bn = BatchNormalization(center=False, scale=False) + self.alpha = self.add_weight(shape=(), dtype=tf.float32, name='alpha') + + def call(self, x): + x_normed = self.bn(x) + x_p = tf.sigmoid(x_normed) + + return self.alpha * (1.0 - x_p) * x + x_p * x \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/npu_convert_dropout.py b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/npu_convert_dropout.py new file mode 100644 index 000000000..95f8689ce --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/npu_convert_dropout.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from keras import backend +from keras.utils import control_flow_util +from keras.layers.core import Dropout +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import nn +import npu_ops + +def dropout_call(self, inputs, training=None): + """Make Keras Dropout to execute NPU dropout""" + if training is None: + training = backend.learning_phase() + + def dropped_inputs(): + return npu_ops.dropout( + inputs, + noise_shape=self._get_noise_shape(inputs), + seed=self.seed, + keep_prob=1 - self.rate) + + output = control_flow_util.smart_cond(training, + dropped_inputs, + lambda : array_ops.identity(inputs)) + + return output + +Dropout.call = dropout_call diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/npu_ops.py b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/npu_ops.py new file mode 100644 index 000000000..fa6f8f211 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/npu_ops.py @@ -0,0 +1,256 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Ops for collective operations implemented using hccl.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import numbers +from tensorflow.python.ops import array_ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import ops +from tensorflow.python.eager import context + +from npu_device import gen_npu_ops + + +DEFAULT_GRAPH_SEED = 87654321 +_MAXINT32 = 2**31 - 1 +def LARSV2(input_weight, + input_grad, + weight_decay, + learning_rate, + hyperpara=0.001, + epsilon=0.00001, + use_clip=False, + name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.LARSV2() is not compatible with " + "eager execution.") + + return gen_npu_ops.lars_v2(input_weight=input_weight, + input_grad=input_grad, + weight_decay=weight_decay, + learning_rate=learning_rate, + hyperpara=hyperpara, + epsilon=epsilon, + use_clip=use_clip, + name=name) + + +def _truncate_seed(seed): + return seed % _MAXINT32 # Truncate to fit into 32-bit integer + +def get_seed(op_seed): + global_seed = ops.get_default_graph().seed + + if global_seed is not None: + if op_seed is None: + op_seed = ops.get_default_graph()._last_id + + seeds = _truncate_seed(global_seed), _truncate_seed(op_seed) + else: + if op_seed is not None: + seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed) + else: + seeds = None, None + # Avoid (0, 0) as the C++ ops interpret it as nondeterminism, which would + # be unexpected since Python docs say nondeterminism is (None, None). + if seeds == (0, 0): + return (0, _MAXINT32) + return seeds + +def _get_noise_shape(x, noise_shape): + # If noise_shape is none return immediately. + if noise_shape is None: + return array_ops.shape(x) + + try: + # Best effort to figure out the intended shape. + # If not possible, let the op to handle it. + # In eager mode exception will show up. + noise_shape_ = tensor_shape.as_shape(noise_shape) + except (TypeError, ValueError): + return noise_shape + + if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims): + new_dims = [] + for i, dim in enumerate(x.shape.dims): + if noise_shape_.dims[i].value is None and dim.value is not None: + new_dims.append(dim.value) + else: + new_dims.append(noise_shape_.dims[i].value) + return tensor_shape.TensorShape(new_dims) + + return noise_shape + +def dropout(x, keep_prob, noise_shape=None, seed=None, name=None): + """The gradient for `gelu`. + + Args: + x: A tensor with type is float. + keep_prob: A tensor, float, rate of every element reserved. + noise_shape: A 1-D tensor, with type int32, shape of keep/drop what random + generated. + seed: Random seed. + name: Layer name. + + Returns: + A tensor. + """ + if context.executing_eagerly(): + raise RuntimeError("tf.dropout() is not compatible with " + "eager execution.") + x = ops.convert_to_tensor(x, name="x") + if not x.dtype.is_floating: + raise ValueError("x has to be a floating point tensor since it's going to" + " be scaled. Got a %s tensor instead." % x.dtype) + if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: + raise ValueError("keep_prob must be a scalar tensor or a float in the " + "range (0, 1], got %g" % keep_prob) + if isinstance(keep_prob, float) and keep_prob == 1: + return x + seed, seed2 = get_seed(seed) + noise_shape = _get_noise_shape(x, noise_shape) + gen_out = gen_npu_ops.drop_out_gen_mask(noise_shape, keep_prob, seed, seed2, name) + result = gen_npu_ops.drop_out_do_mask(x, gen_out, keep_prob, name) + return result + +@ops.RegisterGradient("DropOutDoMask") +def _DropOutDoMaskGrad(op, grad): + result = gen_npu_ops.drop_out_do_mask(grad, op.inputs[1], op.inputs[2]) + return [result, None, None] + +def basic_lstm_cell(x, h, c, w, b, keep_prob, forget_bias, state_is_tuple, + activation, name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.basic_lstm_cell() is not compatible with " + "eager execution.") + x = ops.convert_to_tensor(x, name="x") + h = ops.convert_to_tensor(h, name="h") + c = ops.convert_to_tensor(c, name="c") + w = ops.convert_to_tensor(w, name="w") + b = ops.convert_to_tensor(b, name="b") + result = gen_npu_ops.basic_lstm_cell(x, h, c, w, b, keep_prob, forget_bias, state_is_tuple, + activation, name) + return result + +@ops.RegisterGradient("BasicLSTMCell") +def basic_lstm_cell_grad(op, dct, dht, dit, djt, dft, dot, dtanhct): + + dgate, dct_1 = gen_npu_ops.basic_lstm_cell_c_state_grad(op.inputs[2], dht, dct, op.outputs[2], op.outputs[3], op.outputs[4], op.outputs[5], op.outputs[6], forget_bias=op.get_attr("forget_bias"), activation=op.get_attr("activation")) + dw, db = gen_npu_ops.basic_lstm_cell_weight_grad(op.inputs[0], op.inputs[1], dgate) + dxt, dht = gen_npu_ops.basic_lstm_cell_input_grad(dgate, op.inputs[3], keep_prob=op.get_attr("keep_prob")) + + return [dxt, dht, dct_1, dw, db] + +def adam_apply_one_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, add2_y, name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.adam_apply_one_assign() is not compatible with " + "eager execution.") + result = gen_npu_ops.adam_apply_one_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, add2_y,name) + return result + +def adam_apply_one_with_decay_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y, name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.adam_apply_one_with_decay_assign() is not compatible with " + "eager execution.") + result = gen_npu_ops.adam_apply_one_with_decay_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y, name) + return result + +@ops.RegisterGradient("DynamicGruV2") +def dynamic_gru_v2_grad(op, dy, doutput_h, dupdate, dreset, dnew, dhidden_new): + (x, weight_input, weight_hidden, bias_input, bias_hidden, seq_length, init_h) = op.inputs + (y, output_h, update, reset, new, hidden_new) = op.outputs + (dw_input, dw_hidden, db_input, db_hidden, dx, dh_prev) = gen_npu_ops.dynamic_gru_v2_grad(x, weight_input, weight_hidden, y, init_h, output_h, dy, doutput_h, update, reset, new, hidden_new, direction=op.get_attr("direction"), cell_depth=op.get_attr("cell_depth"), keep_prob=op.get_attr("keep_prob"), cell_clip=op.get_attr("cell_clip"), num_proj=op.get_attr("num_proj"), time_major=op.get_attr("time_major"), gate_order=op.get_attr("gate_order"), reset_after=op.get_attr("reset_after")) + + return (dx, dw_input, dw_hidden, db_input, db_hidden, seq_length, dh_prev) + +@ops.RegisterGradient("DynamicRnn") +def dynamic_rnn_grad(op, dy, dh, dc, di, dj, df, do, dtanhc): + (x, w, b, seq_length, init_h, init_c) = op.inputs + (y, output_h, output_c, i, j, f, o, tanhc) = op.outputs + (dw, db, dx, dh_prev, dc_prev) = gen_npu_ops.dynamic_rnn_grad(x, w, b, y, init_h[-1], init_c[-1], output_h, output_c, dy, dh[-1], dc[-1], i, j, f, o, tanhc, cell_type=op.get_attr("cell_type"), direction=op.get_attr("direction"), cell_depth=op.get_attr("cell_depth"), use_peephole=op.get_attr("use_peephole"), keep_prob=op.get_attr("keep_prob"), cell_clip=op.get_attr("cell_clip"), num_proj=op.get_attr("num_proj"), time_major=op.get_attr("time_major"), forget_bias=op.get_attr("forget_bias")) + + return (dx, dw, db, seq_length, dh_prev, dc_prev) + +def lamb_apply_optimizer_assign(input0,input1,input2,input3,mul0_x,mul1_x,mul2_x, + mul3_x,add2_y,steps,do_use_weight,weight_decay_rate,name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.lamb_apply_optimizer_assign() is not compatible with eager execution") + update,nextv,nextm=gen_npu_ops.lamb_apply_optimizer_assign(input0,input1,input2,input3,mul0_x,mul1_x,mul2_x, + mul3_x,add2_y,steps,do_use_weight,weight_decay_rate,name) + return update,nextv,nextm + +def lamb_apply_weight_assign(input0,input1,input2,input3,input4,name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.lamb_apply_weight_assign() is not compatible with eager execution") + result = gen_npu_ops.lamb_apply_weight_assign(input0,input1,input2,input3,input4,name) + return result + +def dropout_v3(x, keep_prob, noise_shape=None, seed=None, name=None): + """ The gradient for gelu + + Args: + x: A tensor with type is float + keep_prob: A tensor, float, rate of every element reserved + noise_shape: A 1-D tensor, with type int32, shape of keep/drop what random generated. + seed: Random seed. + name: Layer name. + + Returns: + A tensor. + """ + x = ops.convert_to_tensor(x,name="x") + if not x.dtype.is_floating: + raise ValueError("x has to be a floating point tensor since it's going to be scaled. Got a %s tensor instead." % x.dtype) + + if isinstance(keep_prob,numbers.Real) and not 0 < keep_prob <=1: + raise ValueError("Keep_prob must be a scalar tensor or a float in the range (0,1], got %g" % keep_prob) + + if isinstance(keep_prob,float) and keep_prob==1: + return x + + seed, seed2 = get_seed(seed) + noise_shape = _get_noise_shape(x,noise_shape) + gen_out = gen_npu_ops.drop_out_gen_mask_v3(noise_shape,keep_prob,seed,seed2,name) + result = gen_npu_ops.drop_out_do_mask_v3(x, gen_out, keep_prob, name) + return result + +@ops.RegisterGradient("DropOutDoMaskV3") +def _DropOutDoMaskV3Grad(op,grad): + result = gen_npu_ops.drop_out_do_mask_v3(grad, op.inputs[1], op.inputs[2]) + return [result, None, None] \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/preprocess/1_convert_pd.py b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/preprocess/1_convert_pd.py new file mode 100644 index 000000000..b5047be53 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/preprocess/1_convert_pd.py @@ -0,0 +1,72 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on May 24, 2020 + +json ---> pd --->pkl +meta只保留reviews文件中出现过的商品 + +@author: Ziyao Geng +""" + +import pickle +import pandas as pd + + +def to_df(file_path): + """ + 转化为DataFrame结构 + :param file_path: 文件路径 + :return: + """ + with open(file_path, 'r') as fin: + df = {} + i = 0 + for line in fin: + df[i] = eval(line) + i += 1 + df = pd.DataFrame.from_dict(df, orient='index') + return df + + +reviews_df = to_df('reviews_Electronics_5.json') + +# 改变列的顺序 +# reviews2_df = pd.read_json('reviews_Electronics_5.json', lines=True) + + +with open('reviews.pkl', 'wb') as f: + pickle.dump(reviews_df, f, pickle.HIGHEST_PROTOCOL) + +meta_df = to_df('meta_Electronics.json') +meta_df = meta_df[meta_df['asin'].isin(reviews_df['asin'].unique())] +meta_df = meta_df.reset_index(drop=True) +with open('meta.pkl', 'wb') as f: + pickle.dump(meta_df, f, pickle.HIGHEST_PROTOCOL) diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/preprocess/2_remap_id.py b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/preprocess/2_remap_id.py new file mode 100644 index 000000000..4fd621d1f --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/preprocess/2_remap_id.py @@ -0,0 +1,116 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on May 24, 2020 + +reviews_df保留'reviewerID'【用户ID】, 'asin'【产品ID】, 'unixReviewTime'【浏览时间】三列 +meta_df保留'asin'【产品ID】, 'categories'【种类】两列 + +reviews.pkl: 1689188 * 9 +['reviewerID', 'asin', 'reviewerName', 'helpful', 'reviewText', + 'overall', 'summary', 'unixReviewTime', 'reviewTime'] + + reviewerID asin ... unixReviewTime reviewTime +0 AO94DHGC771SJ 0528881469 ... 1370131200 06 2, 2013 +1 AMO214LNFCEI4 0528881469 ... 1290643200 11 25, 2010 + +meta.pkl: 63001 * 9 +['asin', 'imUrl', 'description', 'categories', 'title', 'price', + 'salesRank', 'related', 'brand'] + asin categories +0 0528881469 [[Electronics, GPS & Navigation, Vehicle GPS, ... + +@author: Ziyao Geng +""" + +import random +import pickle +import numpy as np +import pandas as pd + +random.seed(2020) + + +def build_map(df, col_name): + """ + 制作一个映射,键为列名,值为序列数字 + :param df: reviews_df / meta_df + :param col_name: 列名 + :return: 字典,键 + """ + key = sorted(df[col_name].unique().tolist()) + m = dict(zip(key, range(len(key)))) + df[col_name] = df[col_name].map(lambda x: m[x]) + return m, key + + +# reviews +reviews_df = pd.read_pickle('reviews.pkl') +reviews_df = reviews_df[['reviewerID', 'asin', 'unixReviewTime']] + +# meta +meta_df = pd.read_pickle('meta.pkl') +meta_df = meta_df[['asin', 'categories']] +# 类别只保留最后一个 +meta_df['categories'] = meta_df['categories'].map(lambda x: x[-1][-1]) + +# meta_df文件的物品ID映射 +asin_map, asin_key = build_map(meta_df, 'asin') +# meta_df文件物品种类映射 +cate_map, cate_key = build_map(meta_df, 'categories') +# reviews_df文件的用户ID映射 +revi_map, revi_key = build_map(reviews_df, 'reviewerID') + +# user_count: 192403 item_count: 63001 cate_count: 801 example_count: 1689188 +user_count, item_count, cate_count, example_count = \ + len(revi_map), len(asin_map), len(cate_map), reviews_df.shape[0] +# print('user_count: %d\titem_count: %d\tcate_count: %d\texample_count: %d' % +# (user_count, item_count, cate_count, example_count)) + +# 按物品id排序,并重置索引 +meta_df = meta_df.sort_values('asin') +meta_df = meta_df.reset_index(drop=True) + +# reviews_df文件物品id进行映射,并按照用户id、浏览时间进行排序,重置索引 +reviews_df['asin'] = reviews_df['asin'].map(lambda x: asin_map[x]) +reviews_df = reviews_df.sort_values(['reviewerID', 'unixReviewTime']) +reviews_df = reviews_df.reset_index(drop=True) +reviews_df = reviews_df[['reviewerID', 'asin', 'unixReviewTime']] + +# 各个物品对应的类别 +cate_list = np.array(meta_df['categories'], dtype='int32') + +# 保存所需数据为pkl文件 +with open('remap.pkl', 'wb') as f: + pickle.dump(reviews_df, f, pickle.HIGHEST_PROTOCOL) + pickle.dump(cate_list, f, pickle.HIGHEST_PROTOCOL) + pickle.dump((user_count, item_count, cate_count, example_count), + f, pickle.HIGHEST_PROTOCOL) + pickle.dump((asin_key, cate_key, revi_key), f, pickle.HIGHEST_PROTOCOL) diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/preprocess/__init__.py b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/preprocess/__init__.py new file mode 100644 index 000000000..9772d6bd7 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/preprocess/__init__.py @@ -0,0 +1,29 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/run_1p.sh b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/run_1p.sh new file mode 100644 index 000000000..bf5c6ac58 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/run_1p.sh @@ -0,0 +1,3 @@ +cur_path='pwd' +python3 ${cur_path}/train.py --epochs=5 --data_path=. --batch_size=4096 --ckpt_save_path="" --precision_mode="" > loss+perf_gpu.txt 2>&1 + diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..0b50cbd10 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,187 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 +export NPU_CALCULATE_DEVICE=$ASCEND_DEVICE_ID +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=$ASCEND_DEVICE_ID + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="DIN_ID2641_for_TensorFlow2.X" +#训练epoch +train_epochs=5 +#训练batch_size +batch_size=4096 + + +############维测参数############## +# precision_mode="allow_mix_precision" +precision_mode="allow_fp32_to_fp16" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="${cur_path}/../configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="${cur_path}/../configs/fusion_switch.cfg" +############维测参数############## + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --log_steps* ]];then + log_steps=`echo ${para#*=}` + fi +done +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_full_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 train.py --epochs=$train_epochs \ + --batch_size=$batch_size \ + --data_path=$data_path \ + --ckpt_save_path="" \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep loss | grep auc | awk 'END{print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '\- loss:' $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' | head -n ${train_epochs} >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..6a57fc842 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,187 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 + +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=$ASCEND_DEVICE_ID + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="DIN_ID2641_for_TensorFlow2.X" +#训练epoch +train_epochs=5 +#训练batch_size +batch_size=4096 + + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="${cur_path}/../configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="${cur_path}/../configs/fusion_switch.cfg" +############维测参数############## + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --log_steps* ]];then + log_steps=`echo ${para#*=}` + fi +done +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_full_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 train.py --epochs=$train_epochs \ + --batch_size=$batch_size \ + --data_path=$data_path \ + --ckpt_save_path="" \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=`cat ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | tr -d '\b\r'| egrep -Eo "loss: [0-9]*\.[0-9]* - auc: [0-9]*\.[0-9]*" | awk -F " " '{print $5}' | tail -n 1` +train_accuracy=`cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep loss | grep auc | awk 'END{print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '\- loss:' $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' | head -n ${train_epochs} >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/test/train_performance_1p_static.sh new file mode 100644 index 000000000..3deae31de --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/test/train_performance_1p_static.sh @@ -0,0 +1,189 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 + +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=$ASCEND_DEVICE_ID + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="DIN_ID2641_for_TensorFlow2.X" +#训练epoch +train_epochs=3 + +#训练batch_size +batch_size=4096 + + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="${cur_path}/../configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="${cur_path}/../configs/fusion_switch.cfg" +############维测参数############## + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --log_steps* ]];then + log_steps=`echo ${para#*=}` + fi +done +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_full_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 train.py --epochs=$train_epochs \ + --batch_size=$batch_size \ + --data_path=$data_path \ + --ckpt_save_path="" \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --static=1 > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=`cat ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | tr -d '\b\r'| egrep -Eo "loss: [0-9]*\.[0-9]* - auc: [0-9]*\.[0-9]*" | awk -F " " '{print $5}' | tail -n 1` +train_accuracy=`cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep loss | grep auc | awk 'END{print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '\- loss:' $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' | head -n ${train_epochs} >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..a60c8a7b9 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/train.py @@ -0,0 +1,246 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on Oct 23, 2020 + +train DIN model + +@author: Ziyao Geng +""" + +import npu_device + +import tensorflow as tf +from time import time +from tensorflow.keras.losses import binary_crossentropy +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.callbacks import EarlyStopping +from tensorflow.keras.metrics import AUC + +from model import DIN +from utils import * + +import os +import ast +import time +import numpy as np +import argparse + +def parse_args(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_path', default='', help="""directory to data""") + parser.add_argument('--ckpt_save_path', default='', help="""directory to ckpt""") + parser.add_argument('--batch_size', default=32, type=int, help="""batch size for 1p""") + parser.add_argument('--epochs', default=3, type=int, help="""epochs""") + parser.add_argument('--log_steps', default=1, type=int, help="""log frequency""") + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--static', default=0, type=int, help="""static""") + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + +args = parse_args() + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode=args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + npu_device.open().as_default() + +npu_config() + +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + #self.opt = optimizer + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + +class LossHistory(tf.keras.callbacks.Callback): + def __init__(self, bs): + super().__init__() + self.batch_size = bs + def on_batch_begin(self, batch, logs={}): + self.start = time.time() + def on_batch_end(self, batch, logs={}): + if batch % args.log_steps == 0: + loss = logs.get('loss') + dura = time.time() - self.start + if dura < 10: + self.epoch_perf.append(dura) + print('step:%d ,loss: %f ,time:%f'%(batch, loss, dura), flush=True) + def on_epoch_begin(self, epoch, logs={}): + self.epoch_perf = [] + self.epochstart = time.time() + def on_epoch_end(self, epoch, logs={}): + duration = time.time() - self.epochstart + print('epoch_duration: ', duration) + if epoch != 0: + self.perf.append(np.mean(self.epoch_perf)) + def on_train_begin(self, logs={}): + print('params: ', self.params) + self.perf = [] + def on_train_end(self, logs={}): + print('imgs/s: %.2f'%(self.batch_size / np.mean(self.perf))) + +if __name__ == '__main__': + # ========================= Hyper Parameters ======================= + file = 'remap.pkl' + file = os.path.join(args.data_path, file) + maxlen = 20 + + embed_dim = 8 + att_hidden_units = [80, 40] + ffn_hidden_units = [256, 128, 64] + dnn_dropout = 0.5 + att_activation = 'sigmoid' + ffn_activation = 'prelu' + + learning_rate = 0.001 + batch_size = args.batch_size + epochs = args.epochs + # ========================== Create dataset ======================= + feature_columns, behavior_list, train, val, test = create_amazon_electronic_dataset(file, embed_dim, maxlen) + train_X, train_y = train + val_X, val_y = val + test_X, test_y = test + print('=========================train Parameters =======================') + if args.static==1: + train_X = [np.array(train_X[0][:2220032]),np.array(train_X[1][:2220032]), np.array(train_X[2][:2220032]), np.array(train_X[3][:2220032])] + train_y = np.array(train_y[:2220032]) + val_X = [np.array(val_X[0][:380928]),np.array(val_X[1][:380928]), np.array(val_X[2][:380928]), np.array(val_X[3][:380928])] + val_y = np.array(val_y[:380928]) + test_X = [np.array(test_X[0][:380928]),np.array(test_X[1][:380928]), np.array(test_X[2][:380928]), np.array(test_X[3][:380928])] + test_y = np.array(test_y[:380928]) + print('=========================test Parameters =======================') + + # ============================Build Model========================== + model = DIN(feature_columns, behavior_list, att_hidden_units, ffn_hidden_units, att_activation, + ffn_activation, maxlen, dnn_dropout) + model.summary() + #logger = LossHistory(batch_size) + logger = TimeHistory(batch_size,100) + # ============================model checkpoint====================== + # check_path = 'save/din_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt' + # checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path, save_weights_only=True, + # verbose=1, period=5) + # =========================Compile============================ + model.compile(loss=binary_crossentropy, optimizer=Adam(learning_rate=learning_rate), + metrics=[AUC()]) + # ===========================Fit============================== + model.fit( + train_X, + train_y, + epochs=epochs, + # callbacks=[EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True)], # checkpoint + callbacks=logger, + validation_data=(val_X, val_y), + batch_size=batch_size, + verbose=2 + ) + save_ckpt = os.path.join(args.ckpt_save_path, "checkpoint/tf_model") + model.save_weights(filepath=save_ckpt, save_format="tf") + # ===========================Test============================== + print('test AUC: %f' % model.evaluate(test_X, test_y, batch_size=batch_size, verbose=2)[1]) diff --git a/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/utils.py b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/utils.py new file mode 100644 index 000000000..bf8389206 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/DIN_ID2641_for_TensorFlow2.X/utils.py @@ -0,0 +1,148 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on May 25, 2020 + +create amazon electronic dataset + +@author: Ziyao Geng +""" +import pandas as pd +import numpy as np +import pickle +import random +from tqdm import tqdm +from tensorflow.keras.preprocessing.sequence import pad_sequences + + +def sparseFeature(feat, feat_num, embed_dim=4): + """ + create dictionary for sparse feature + :param feat: feature name + :param feat_num: the total number of sparse features that do not repeat + :param embed_dim: embedding dimension + :return: + """ + return {'feat': feat, 'feat_num': feat_num, 'embed_dim': embed_dim} + + +def denseFeature(feat): + """ + create dictionary for dense feature + :param feat: dense feature name + :return: + """ + return {'feat': feat} + + +def create_amazon_electronic_dataset(file, embed_dim=8, maxlen=40): + """ + :param file: dataset path + :param embed_dim: latent factor + :param maxlen: + :return: user_num, item_num, train_df, test_df + """ + print('==========Data Preprocess Start============') + with open(file, 'rb') as f: + reviews_df = pickle.load(f) + cate_list = pickle.load(f) + user_count, item_count, cate_count, example_count = pickle.load(f) + + reviews_df = reviews_df + reviews_df.columns = ['user_id', 'item_id', 'time'] + + train_data, val_data, test_data = [], [], [] + + for user_id, hist in tqdm(reviews_df.groupby('user_id')): + pos_list = hist['item_id'].tolist() + + def gen_neg(): + neg = pos_list[0] + while neg in pos_list: + neg = random.randint(0, item_count - 1) + return neg + + neg_list = [gen_neg() for i in range(len(pos_list))] + hist = [] + for i in range(1, len(pos_list)): + hist.append([pos_list[i - 1], cate_list[pos_list[i-1]]]) + hist_i = hist.copy() + if i == len(pos_list) - 1: + test_data.append([hist_i, [pos_list[i], cate_list[pos_list[i]]], 1]) + test_data.append([hist_i, [neg_list[i], cate_list[neg_list[i]]], 0]) + # test_data.append([hist_i, [pos_list[i]], 1]) + # test_data.append([hist_i, [neg_list[i]], 0]) + elif i == len(pos_list) - 2: + val_data.append([hist_i, [pos_list[i], cate_list[pos_list[i]]], 1]) + val_data.append([hist_i, [neg_list[i], cate_list[neg_list[i]]], 0]) + # val_data.append([hist_i, [pos_list[i]], 1]) + # val_data.append([hist_i, [neg_list[i]], 0]) + else: + train_data.append([hist_i, [pos_list[i], cate_list[pos_list[i]]], 1]) + train_data.append([hist_i, [neg_list[i], cate_list[neg_list[i]]], 0]) + # train_data.append([hist_i, [pos_list[i]], 1]) + # train_data.append([hist_i, [neg_list[i]], 0]) + + # feature columns + feature_columns = [[], + [sparseFeature('item_id', item_count, embed_dim), + ]] # sparseFeature('cate_id', cate_count, embed_dim) + + # behavior + behavior_list = ['item_id'] # , 'cate_id' + + # shuffle + random.shuffle(train_data) + random.shuffle(val_data) + random.shuffle(test_data) + + # create dataframe + train = pd.DataFrame(train_data, columns=['hist', 'target_item', 'label']) + val = pd.DataFrame(val_data, columns=['hist', 'target_item', 'label']) + test = pd.DataFrame(test_data, columns=['hist', 'target_item', 'label']) + + # if no dense or sparse features, can fill with 0 + print('==================Padding===================') + train_X = [np.array([0.] * len(train)), np.array([0] * len(train)), + pad_sequences(train['hist'], maxlen=maxlen), + np.array(train['target_item'].tolist())] + train_y = train['label'].values + val_X = [np.array([0] * len(val)), np.array([0] * len(val)), + pad_sequences(val['hist'], maxlen=maxlen), + np.array(val['target_item'].tolist())] + val_y = val['label'].values + test_X = [np.array([0] * len(test)), np.array([0] * len(test)), + pad_sequences(test['hist'], maxlen=maxlen), + np.array(test['target_item'].tolist())] + test_y = test['label'].values + print('============Data Preprocess End=============') + return feature_columns, behavior_list, (train_X, train_y), (val_X, val_y), (test_X, test_y) + +# create_amazon_electronic_dataset('raw_data/remap.pkl') -- Gitee From d014914d0cd5c8d991dbe142b2c39f776be7b50b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:16:49 +0000 Subject: [PATCH 37/54] =?UTF-8?q?FFM=5FID2632=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../FFM_ID2632_for_TensorFlow2.X/LICENSE | 21 ++ .../FFM_ID2632_for_TensorFlow2.X/README.md | 206 +++++++++++++++++ .../FFM_ID2632_for_TensorFlow2.X/__init__.py | 37 +++ .../FFM_ID2632_for_TensorFlow2.X/criteo.py | 113 ++++++++++ .../FFM_ID2632_for_TensorFlow2.X/model.py | 69 ++++++ .../modelzoo_level.txt | 3 + .../FFM_ID2632_for_TensorFlow2.X/modules.py | 89 ++++++++ .../requirements.txt | 0 .../FFM_ID2632_for_TensorFlow2.X/run_1p.sh | 3 + .../test/train_full_1p.sh | 193 ++++++++++++++++ .../test/train_performance_1p.sh | 210 ++++++++++++++++++ .../test/train_performance_1p_static.sh | 190 ++++++++++++++++ .../FFM_ID2632_for_TensorFlow2.X/train.py | 198 +++++++++++++++++ .../FFM_ID2632_for_TensorFlow2.X/utils.py | 58 +++++ 14 files changed, 1390 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/__init__.py create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/criteo.py create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/model.py create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/modules.py create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/run_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/test/train_performance_1p_static.sh create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/train.py create mode 100644 TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/utils.py diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..51d555a15 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Ke YU + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..64b8cfab3 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/README.md @@ -0,0 +1,206 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Recommendation** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.4.11** + +**大小(Size):40KB** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Official** + +**描述(Description):基于TensorFlow框架的用于 CTR 预测的场感知分解机(FFMs)训练代码** + +

概述

+ +## 简述 + +点击率 (CTR) 预测在计算广告中起着重要作用。 基于二次多项式映射和因式分解机 (FM) 的模型被广泛用于此任务。最近,FM 的一种变体,即场感知因子分解机 (FFMs),在一些全球 CTR 预测竞赛中优于现有模型。 + +本实验只用于测试,无实际用途,参考FFM库:https://github.com/ycjuan/libffm + +- 参考论文: + + https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf + +- 参考实现: + + https://github.com/ZiyaoGeng/Recommender-System-with-TF2.0/tree/master/FFM + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + + +## 默认配置 + +- 网络结构: + - 3-layers + - 507521520 total params + +- 训练超参(单卡): + - Batch size: 512 + - Train epochs: 5 + - Learning rate: 0.001 + - Read part: True + - Sample num: 1000000 + - Test size: 0.2 + - K: 10 + + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 + +拉起脚本中,传入--precision_mode='allow_mix_precision' + +``` + ./train_performance_1p_16bs.sh --help + +parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message +``` + +相关代码示例: + +``` +flags.DEFINE_string(name='precision_mode', default= 'allow_fp32_to_fp16', + help='allow_fp32_to_fp16/force_fp16/ ' + 'must_keep_origin_dtype/allow_mix_precision.') + +npu_device.global_options().precision_mode=FLAGS.precision_mode +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + +

快速上手

+ +## 数据集准备 + +1、用户自行准备好数据集,本网络使用的数据集是Criteo数据集 + +数据集目录参考如下: + +``` +├──Criteo +│ ├──demo.txt +│ ├──.DS_Store +│ ├──train.txt +``` + + + +## 模型训练 + +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + 2. 单卡训练 + + 2. 1单卡训练指令(脚本位于FFM_ID2632_for_TensorFlow2.X/test/train_full.sh),需要先使用cd命令进入test目录下,再使用下面的命令启动训练。请确保下面例子中的“--data_path”修改为用户的数据路径,这里选择将数据文件夹放在home目录下。 + + bash train_full_1p.sh --data_path=/home + + + + +

高级参考

+ +## 脚本和示例代码 + +``` +|--LICENSE +|--README.md #说明文档 +|--criteo.py +|--model.py +|--modules.py +|--train.py #训练代码 +|--requirements.txt #所需依赖 +|--run_1p.sh +|--utils.py +|--test #训练脚本目录 +| |--train_full_1p.sh #全量训练脚本 +| |--train_performance_1p.sh #performance训练脚本 +``` + +## 脚本参数 + +``` +--data_path # the path to train data +--epochs # epochs of training +--ckpt_save_path # directory to ckpt +--batch_size # batch size for 1p +--log_steps # log frequency +--sample_num # sample num +--precision_mode # the path to save over dump data +--over_dump # if or not over detection, default is False +--data_dump_flag # data dump flag, default is False +--data_dump_step # data dump step, default is 10 +--profiling # if or not profiling for performance debug, default is False +--profiling_dump_path # the path to save profiling data +--over_dump_path # the path to save over dump data +--data_dump_path # the path to save dump data +--use_mixlist # use_mixlist flag, default is False +--fusion_off_flag # fusion_off flag, default is False +--mixlist_file # mixlist file name, default is ops_info.json +--fusion_off_file # fusion_off file name, default is fusion_switch.cfg +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡或者多卡训练。单卡和多卡通过运行不同脚本,支持单卡,8卡网络训练。模型存储路径为${cur_path}/output/$ASCEND_DEVICE_ID,包括训练的log以及checkpoints文件。以8卡训练为例,loss信息在文件${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/__init__.py b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/__init__.py new file mode 100644 index 000000000..c1ea647fb --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/__init__.py @@ -0,0 +1,37 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Created on May 23, 2020 + +model: Deep interest network for click-through rate prediction + +@author: Ziyao Geng +""" \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/criteo.py b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/criteo.py new file mode 100644 index 000000000..12ca0459c --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/criteo.py @@ -0,0 +1,113 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Created on July 13, 2020 + +dataset锛歝riteo dataset sample +features锛? +- Label - Target variable that indicates if an ad was clicked (1) or not (0). +- I1-I13 - A total of 13 columns of integer features (mostly count features). +- C1-C26 - A total of 26 columns of categorical features. +The values of these features have been hashed onto 32 bits for anonymization purposes. + +@author: Ziyao Geng(zggzy1996@163.com) +""" + +import pandas as pd +import numpy as np + +from sklearn.preprocessing import LabelEncoder, KBinsDiscretizer +from sklearn.model_selection import train_test_split + +from utils import sparseFeature + + +def create_criteo_dataset(file, embed_dim=8, read_part=True, sample_num=100000, test_size=0.2,static=1): + """ + a example about creating criteo dataset + :param file: dataset's path + :param embed_dim: the embedding dimension of sparse features + :param read_part: whether to read part of it + :param sample_num: the number of instances if read_part is True + :param test_size: ratio of test dataset + :return: feature columns, train, test + """ + names = ['label', 'I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', + 'I12', 'I13', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', + 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', + 'C23', 'C24', 'C25', 'C26'] + + if read_part: + data_df = pd.read_csv(file, sep='\t', iterator=True, header=None, + names=names) + data_df = data_df.get_chunk(sample_num) + + else: + data_df = pd.read_csv(file, sep='\t', header=None, names=names) + + sparse_features = ['C' + str(i) for i in range(1, 27)] + dense_features = ['I' + str(i) for i in range(1, 14)] + features = sparse_features + dense_features + + data_df[sparse_features] = data_df[sparse_features].fillna('-1') + data_df[dense_features] = data_df[dense_features].fillna(0) + + # Bin continuous data into intervals. + est = KBinsDiscretizer(n_bins=100, encode='ordinal', strategy='uniform') + data_df[dense_features] = est.fit_transform(data_df[dense_features]) + + for feat in sparse_features: + le = LabelEncoder() + data_df[feat] = le.fit_transform(data_df[feat]) + + # ==============Feature Engineering=================== + + # ==================================================== + feature_columns = [sparseFeature(feat, int(data_df[feat].max()) + 1, embed_dim=embed_dim) + for feat in features] + train, test = train_test_split(data_df, test_size=test_size) + if static==1: + print('=====================[DEBUG]======================',flush=True) + train_X = train[features].values[:77824].astype('int32') + train_y = train['label'].values[:77824].astype('int32') + print("train_X.shape",train_X.shape,flush=True) + print("train_y.shape",train_y.shape,flush=True) + test_X = test[features].values[:8192].astype('int32') + test_y = test['label'].values[:8192].astype('int32') + print("test_X.shape",test_X.shape,flush=True) + print("test_y.shape",test_y.shape,flush=True) + else: + train_X = train[features].values.astype('int32') + train_y = train['label'].values.astype('int32') + test_X = test[features].values.astype('int32') + test_y = test['label'].values.astype('int32') + + return feature_columns, (train_X, train_y), (test_X, test_y) \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/model.py b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/model.py new file mode 100644 index 000000000..dcc40f4d8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/model.py @@ -0,0 +1,69 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on August 26, 2020 +Updated on May 19, 2021 + +model: Field-aware Factorization Machines for CTR Prediction + +@author: Ziyao Geng(zggzy1996@163.com) +""" + +import tensorflow as tf +from tensorflow.keras import Model +from tensorflow.keras.layers import Input, Layer +from tensorflow.keras.regularizers import l2 + +from modules import FFM_Layer + + +class FFM(Model): + def __init__(self, feature_columns, k, w_reg=1e-6, v_reg=1e-6): + """ + FFM architecture + :param feature_columns: A list. sparse column feature information. + :param k: the latent vector + :param w_reg: the regularization coefficient of parameter w + :param field_reg_reg: the regularization coefficient of parameter v + """ + super(FFM, self).__init__() + self.sparse_feature_columns = feature_columns + self.ffm = FFM_Layer(self.sparse_feature_columns, k, w_reg, v_reg) + + def call(self, inputs, **kwargs): + ffm_out = self.ffm(inputs) + outputs = tf.nn.sigmoid(ffm_out) + return outputs + + def summary(self, **kwargs): + sparse_inputs = Input(shape=(len(self.sparse_feature_columns),), dtype=tf.int32) + tf.keras.Model(inputs=sparse_inputs, outputs=self.call(sparse_inputs)).summary() + + diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..a829ab59b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:NOK +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/modules.py b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/modules.py new file mode 100644 index 000000000..72b8ca1eb --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/modules.py @@ -0,0 +1,89 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Created on May 19, 2021 + +modules of FFM + +@author: Ziyao Geng(zggzy1996@163.com) +""" + +import tensorflow as tf +from tensorflow.keras.layers import Layer +from tensorflow.keras.regularizers import l2 + + +class FFM_Layer(Layer): + def __init__(self, sparse_feature_columns, k, w_reg=1e-6, v_reg=1e-6): + """ + + :param dense_feature_columns: A list. sparse column feature information. + :param k: A scalar. The latent vector + :param w_reg: A scalar. The regularization coefficient of parameter w + :param v_reg: A scalar. The regularization coefficient of parameter v + """ + super(FFM_Layer, self).__init__() + self.sparse_feature_columns = sparse_feature_columns + self.k = k + self.w_reg = w_reg + self.v_reg = v_reg + self.index_mapping = [] + self.feature_length = 0 + for feat in self.sparse_feature_columns: + self.index_mapping.append(self.feature_length) + self.feature_length += feat['feat_num'] + self.field_num = len(self.sparse_feature_columns) + + def build(self, input_shape): + self.w0 = self.add_weight(name='w0', shape=(1,), + initializer=tf.zeros_initializer(), + trainable=True) + self.w = self.add_weight(name='w', shape=(self.feature_length, 1), + initializer='random_normal', + regularizer=l2(self.w_reg), + trainable=True) + self.v = self.add_weight(name='v', + shape=(self.feature_length, self.field_num, self.k), + initializer='random_normal', + regularizer=l2(self.v_reg), + trainable=True) + + def call(self, inputs, **kwargs): + inputs = inputs + tf.convert_to_tensor(self.index_mapping) + # first order + first_order = self.w0 + tf.reduce_sum(tf.nn.embedding_lookup(self.w, inputs), axis=1) # (batch_size, 1) + # field second order + second_order = 0 + latent_vector = tf.reduce_sum(tf.nn.embedding_lookup(self.v, inputs), axis=1) # (batch_size, field_num, k) + for i in range(self.field_num): + for j in range(i+1, self.field_num): + second_order += tf.reduce_sum(latent_vector[:, i] * latent_vector[:, j], axis=1, keepdims=True) + return first_order + second_order \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/run_1p.sh b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/run_1p.sh new file mode 100644 index 000000000..2a1601ec1 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/run_1p.sh @@ -0,0 +1,3 @@ +cur_path='pwd' +python3 ${cur_path}/train.py --epochs=5 --data_path=. --batch_size=4096 --sample_num=10000 --ckpt_save_path="" --precision_mode="" > loss+perf_gpu.txt 2>&1 + diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..cec5bd845 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,193 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 +export NPU_CALCULATE_DEVICE=$ASCEND_DEVICE_ID +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=$ASCEND_DEVICE_ID + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="FFM_ID2632_for_TensorFlow2.X" +#训练epoch +train_epochs=10 +#训练batch_size +batch_size=4096 + +sample_num=1000000 + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="${cur_path}/../configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="${cur_path}/../configs/fusion_switch.cfg" +############维测参数############## + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --log_steps* ]];then + log_steps=`echo ${para#*=}` + fi +done +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_full_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 train.py --epochs=$train_epochs \ + --batch_size=$batch_size \ + --data_path=$data_path \ + --ckpt_save_path="" \ + --log_steps=176 \ + --sample_num=$sample_num \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path}> ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ,time: $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $4}' | awk -F ':' '{print $2}' | tail -n 1` +wait +FPS=`grep imgs/s $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $2}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=`cat ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | tr -d '\b\r'| egrep -Eo "loss: [0-9]*\.[0-9]* - auc: [0-9]*\.[0-9]*" | awk -F " " '{print $5}' | tail -n 1` +train_accuracy=`cat ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | grep "test AUC" | awk '{print $3}'` + +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '\- loss:' $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' | head -n ${train_epochs} >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..fd4c90260 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,210 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 +export NPU_CALCULATE_DEVICE=$ASCEND_DEVICE_ID +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=$ASCEND_DEVICE_ID + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="FFM_ID2632_for_TensorFlow2.X" +#训练epoch +train_epochs=2 +#训练batch_size +batch_size=4096 + +sample_num=10000 + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="${cur_path}/../configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="${cur_path}/../configs/fusion_switch.cfg" +############维测参数############## + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --log_steps* ]];then + log_steps=`echo ${para#*=}` + fi +done +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_full_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + ''' + nohup python3 train.py --epochs=$train_epochs \ + --batch_size=$batch_size \ + --data_path=$data_path \ + --ckpt_save_path="" \ + --sample_num=$sample_num \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path}> ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & + ''' + nohup python3 train.py --epochs=$train_epochs \ + --batch_size=$batch_size \ + --data_path=$data_path \ + --ckpt_save_path="" \ + --log_steps=176 \ + --sample_num=$sample_num \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path}> ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ,time: $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $4}' | awk -F ':' '{print $2}' | tail -n 1` +wait +FPS=`grep imgs/s $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $2}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`cat ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | tr -d '\b\r'| egrep -Eo "loss: [0-9]*\.[0-9]* - auc: [0-9]*\.[0-9]*" | awk -F " " '{print $5}' | tail -n 1` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '\- loss:' $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' | head -n ${train_epochs} >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/test/train_performance_1p_static.sh new file mode 100644 index 000000000..5278fa20c --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/test/train_performance_1p_static.sh @@ -0,0 +1,190 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 +export NPU_CALCULATE_DEVICE=$ASCEND_DEVICE_ID +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=$ASCEND_DEVICE_ID + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="FFM_ID2632_for_TensorFlow2.X" +#训练epoch +train_epochs=2 +#训练batch_size +batch_size=4096 + +sample_num=100000 +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="${cur_path}/../configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="${cur_path}/../configs/fusion_switch.cfg" +############维测参数############## + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --log_steps* ]];then + log_steps=`echo ${para#*=}` + fi +done +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_full_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 train.py --epochs=$train_epochs \ + --batch_size=$batch_size \ + --data_path=$data_path \ + --ckpt_save_path="" \ + --sample_num=$sample_num \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --static=1 > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ,time: $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $4}' | awk -F ':' '{print $2}' | tail -n 1` +wait +FPS=`grep imgs/s $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $2}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`cat ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | tr -d '\b\r'| egrep -Eo "loss: [0-9]*\.[0-9]* - auc: [0-9]*\.[0-9]*" | awk -F " " '{print $5}' | tail -n 1` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep '\- loss:' $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $6}' | head -n ${train_epochs} >> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}_static" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..02bd64a66 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/train.py @@ -0,0 +1,198 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Created on August 26, 2020 + +train FFM model + +@author: Ziyao Geng +""" +import npu_device + +import tensorflow as tf +from tensorflow.keras.callbacks import EarlyStopping +from tensorflow.keras.losses import binary_crossentropy +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.metrics import AUC + +from model import FFM +from criteo import create_criteo_dataset + +import os +import ast +import time +import numpy as np +import argparse + +def parse_args(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_path', default='', help="""directory to data""") + parser.add_argument('--ckpt_save_path', default='', help="""directory to ckpt""") + parser.add_argument('--batch_size', default=32, type=int, help="""batch size for 1p""") + parser.add_argument('--epochs', default=3, type=int, help="""epochs""") + parser.add_argument('--sample_num', default=5000000, type=int, help="""sample_num""") + parser.add_argument('--log_steps', default=1, type=int, help="""log frequency""") + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--static', default=0, type=int, help="""static""") + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + +args = parse_args() + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode=args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + npu_device.global_options().variable_memory_max_size=10*1024*1024*1024 + npu_device.global_options().graph_memory_max_size=str("21*1024*1024*1024") + npu_device.open().as_default() + +npu_config() + +class LossHistory(tf.keras.callbacks.Callback): + def __init__(self, bs): + super().__init__() + self.batch_size = bs + def on_batch_begin(self, batch, logs={}): + self.start = time.time() + def on_batch_end(self, batch, logs={}): + if batch % args.log_steps == 0: + loss = logs.get('loss') + dura = time.time() - self.start + if dura < 10: + self.epoch_perf.append(dura) + print('step:%d ,loss: %f ,time:%f'%(batch, loss, dura), flush=True) + def on_epoch_begin(self, epoch, logs={}): + self.epoch_perf = [] + self.epochstart = time.time() + def on_epoch_end(self, epoch, logs={}): + duration = time.time() - self.epochstart + print('epoch_duration: ', duration) + if epoch != 0: + self.perf.append(np.mean(self.epoch_perf)) + def on_train_begin(self, logs={}): + print('params: ', self.params) + self.perf = [] + def on_train_end(self, logs={}): + print('imgs/s: %.2f'%(self.batch_size / np.mean(self.perf))) + +if __name__ == '__main__': + # =============================== GPU ============================== + # gpu = tf.config.experimental.list_physical_devices(device_type='GPU') + # print(gpu) + # If you have GPU, and the value is GPU serial number. + # os.environ['CUDA_VISIBLE_DEVICES'] = '4' + # ========================= Hyper Parameters ======================= + # you can modify your file path + file = 'train.txt' + file = os.path.join(args.data_path, file) + print(file) + read_part = True + sample_num = args.sample_num + test_size = 0.2 + + k = 10 + + learning_rate = 0.001 + batch_size = args.batch_size + epochs = args.epochs + # ========================== Create dataset ======================= + feature_columns, train, test = create_criteo_dataset(file=file, + read_part=read_part, + sample_num=sample_num, + test_size=test_size, + static=args.static) + train_X, train_y = train + test_X, test_y = test + # ============================Build Model========================== + model = FFM(feature_columns=feature_columns, k=k) + model.summary() + logger = LossHistory(batch_size) + # ============================model checkpoint====================== + # check_path = '../save/fm_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt' + # checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path, save_weights_only=True, + # verbose=1, period=5) + # ============================Compile============================ + model.compile(loss=binary_crossentropy, optimizer=Adam(learning_rate=learning_rate), + metrics=[AUC()]) + # ==============================Fit============================== + model.fit( + train_X, + train_y, + epochs=epochs, + # callbacks=[EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True)], # checkpoint + callbacks=logger, + batch_size=batch_size, + validation_split=0.1, + verbose=2 + ) + save_ckpt = os.path.join(args.ckpt_save_path, "checkpoint/tf_model") + model.save_weights(filepath=save_ckpt, save_format="tf") + # ===========================Test============================== + print('test AUC: %f' % model.evaluate(test_X, test_y, batch_size=batch_size)[1]) diff --git a/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/utils.py b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/utils.py new file mode 100644 index 000000000..42da51609 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FFM_ID2632_for_TensorFlow2.X/utils.py @@ -0,0 +1,58 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Created on July 13, 2020 +Updated on May 18, 2021 + +input feature columns: sparseFeature, denseFeature + +@author: Ziyao Geng(zggzy1996@163.com) +""" + + +def sparseFeature(feat, feat_num, embed_dim=4): + """ + create dictionary for sparse feature + :param feat: feature name + :param feat_num: the total number of sparse features that do not repeat + :param embed_dim: embedding dimension + :return: + """ + return {'feat_name': feat, 'feat_num': feat_num, 'embed_dim': embed_dim} + + +def denseFeature(feat): + """ + create dictionary for dense feature + :param feat: dense feature name + :return: + """ + return {'feat_name': feat} \ No newline at end of file -- Gitee From 8dfa33843000e09ef426337dd4c49d8c464db91a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:17:12 +0000 Subject: [PATCH 38/54] =?UTF-8?q?FM=5FID2631=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../FM_ID2631_for_TensorFlow2.X/LICENSE | 284 ++++++++++++++++++ .../FM_ID2631_for_TensorFlow2.X/README.md | 221 ++++++++++++++ .../FM_ID2631_for_TensorFlow2.X/README_BAK.md | 70 +++++ .../FM_ID2631_for_TensorFlow2.X/criteo.py | 111 +++++++ .../FM_ID2631_for_TensorFlow2.X/model.py | 113 +++++++ .../modelzoo_level.txt | 3 + .../requirements.txt | 5 + .../FM_ID2631_for_TensorFlow2.X/run_1p.sh | 1 + .../test/train_full_1p.sh | 206 +++++++++++++ .../test/train_performance_1p.sh | 205 +++++++++++++ .../test/train_performance_1p_static.sh | 206 +++++++++++++ .../FM_ID2631_for_TensorFlow2.X/train.py | 185 ++++++++++++ .../FM_ID2631_for_TensorFlow2.X/utils.py | 57 ++++ 13 files changed, 1667 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/README_BAK.md create mode 100644 TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/criteo.py create mode 100644 TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/model.py create mode 100644 TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/run_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/test/train_performance_1p_static.sh create mode 100644 TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/train.py create mode 100644 TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/utils.py diff --git a/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..b6056e48b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/README.md @@ -0,0 +1,221 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Recommendation** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.04.21** + +**大小(Size):16M** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Official** + +**描述(Description):基于TensorFlow2.X框架的推荐算法CTR预估模型的训练代码** + + +

概述

+ +## 简述 + + 因子分解机(Factorization Machine, FM)是由Steffen Rendle提出的一种基于矩阵分解的机器学习算法。目前,被广泛的应用于广告预估模型中,相比LR而言,效果强了不少。是一种不错的CTR预估模型,也是我们现在在使用的广告点击率预估模型,比起著名的Logistic Regression, FM能够把握一些组合的高阶特征,因此拥有更强的表现力。 + + + - 参考论文: + https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5694074(https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5694074) + + - 参考实现: + https://github.com/ZiyaoGeng/Recommender-System-with-TF2.0/tree/master/FM(https://github.com/ZiyaoGeng/Recommender-System-with-TF2.0/tree/master/FM) + + + - 适配昇腾 AI 处理器的实现: + skip + + - 通过Git获取对应commit\_id的代码方法如下: + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + + + + +## 默认配置 + + +- 网络结构 + - SVM模型与factorization模型的结合,可以在非常稀疏的数据中进行合理的参数轨迹。 + - 考虑到多维特征之间的交叉关系,其中参数的训练使用的是矩阵分解的方法。 + - 在FM中,每个评分记录被放在一个矩阵的一行中,从列数看特征矩阵x的前面u列即为User矩阵,每个User对应一列,接下来的i列即为item特征矩阵,之后数列是多余的非显式的特征关系。后面一列表示时间关系,最后i列则表示同一个user在上一条记录中的结果,用于表示用户的历史行为。 + +- 训练超参(单卡): + - file:Criteo文件; + - read_part:是否读取部分数据,True(full脚本为False); + - sample_num:读取部分时,样本数量,1000000; + - test_size:测试集比例,0.2; + - k:隐因子,8; + - dnn_dropout:Dropout, 0.5; + - hidden_unit:DNN的隐藏单元,[256, 128, 64]; + - learning_rate:学习率,0.001; + - batch_size:4096; + - epoch:10; + + +## 支持特性 + +| 特性列表 | 是否支持 | +|-------|------| +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +相关代码示例。 + +``` + config_proto = tf.ConfigProto(allow_soft_placement=True) + custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add() + custom_op.name = 'NpuOptimizer' + custom_op.parameter_map["use_off_line"].b = True + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") + config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF + session_config = npu_config_proto(config_proto=config_proto) +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + + +

快速上手

+ +## 数据集准备 + + 采用Criteo数据集进行测试。数据集的处理见../data_process文件,主要分为: +1. 考虑到Criteo文件过大,因此可以通过read_part和sample_sum读取部分数据进行测试; +2. 对缺失数据进行填充; +3. 对密集数据I1-I13进行离散化分桶(bins=100),对稀疏数据C1-C26进行重新编码LabelEncoder; +4. 整理得到feature_columns; +5. 切分数据集,最后返回feature_columns, (train_X, train_y), (test_X, test_y); + + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2.1 设置单卡训练参数(脚本位于FM_ID2631_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。 + + + ``` + batch_size=4096 + #训练step + train_epochs=10 + #训练epoch + ``` + + 2.2 单卡训练指令(FM_ID2631_for_TensorFlow2.X/test) + + ``` + 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡 + bash train_full_1p.sh --data_path=xx + 数据集应为txt类型,配置data_path时需指定为Criteo这一层,例:--data_path=/home/data/Criteo + ├─data + ├──Criteo + │ ├──demo.txt + │ ├──.DS_Store + │ ├──train.txt + + ``` + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备 + +- 模型训练 + + 请参考“快速上手”章节 + +

高级参考

+ +## 脚本和示例代码 + + + |--modelzoo_level.txt #状态文件 + |--LICENSE + |--README.md #说明文档 + |--criteo.py + |--model.py #模型结构代码 + |--modules.py + |--train.py #训练代码 + |--requirements.txt #所需依赖 + |--run_1p.sh + |--utils.py + |--test #训练脚本目录 + | |--train_full_1p.sh #全量训练脚本 + | |--train_performance_1p.sh #performance训练脚本 + + +## 脚本参数 + +``` +batch_size 训练batch_size +epochs 训练epoch数 +precision_mode default="allow_mix_precision", type=str,help='the path to save over dump data' +over_dump type=ast.literal_eval,help='if or not over detection, default is False' +data_dump_flag type=ast.literal_eval,help='data dump flag, default is False' +data_dump_step data dump step, default is 10 +profiling type=ast.literal_eval help='if or not profiling for performance debug, default is False' +profiling_dump_path type=str, help='the path to save profiling data' +over_dump_path type=str, help='the path to save over dump data' +data_dump_path type=str, help='the path to save dump data' +use_mixlist type=ast.literal_eval,help='use_mixlist flag, default is False' +fusion_off_flag type=ast.literal_eval,help='fusion_off flag, default is False' +mixlist_file type=str,help='mixlist file name, default is ops_info.json' +fusion_off_file type=str,help='fusion_off file name, default is fusion_switch.cfg' +auto_tune help='auto_tune flag, default is False' +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡训练。 +将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 diff --git a/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/README_BAK.md b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/README_BAK.md new file mode 100644 index 000000000..18d7e7a75 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/README_BAK.md @@ -0,0 +1,70 @@ +## FM + +### 1. 论文 +Factorization Machines + +**创新**:**经典因子分解机模型** + + + +### 2. 模型结构 + +
+ +
+ + + +### 3. 实验数据集 + +采用Criteo数据集进行测试。数据集的处理见`../data_process`文件,主要分为: + +1. 考虑到Criteo文件过大,因此可以通过`read_part`和`sample_sum`读取部分数据进行测试; +2. 对缺失数据进行填充; +3. 对密集数据`I1-I13`进行离散化分桶(bins=100),对稀疏数据`C1-C26`进行重新编码`LabelEncoder`; +4. 整理得到`feature_columns`; +5. 切分数据集,最后返回`feature_columns, (train_X, train_y), (test_X, test_y)`; + + + +### 4. 模型API + +```python +class FM_Layer(Layer): + def __init__(self, feature_columns, k, w_reg=1e-6, v_reg=1e-6): + """ + Factorization Machines + :param feature_columns: A list. sparse column feature information. + :param k: the latent vector + :param w_reg: the regularization coefficient of parameter w + :param v_reg: the regularization coefficient of parameter v + """ +``` + + + +### 5. 实验超参数 + +- file:Criteo文件; +- read_part:是否读取部分数据,`True`; +- sample_num:读取部分时,样本数量,`1000000`; +- test_size:测试集比例,`0.2`; +- +- k:隐因子,`8`; +- dnn_dropout:Dropout, `0.5`; +- hidden_unit:DNN的隐藏单元,`[256, 128, 64]`; +- +- learning_rate:学习率,`0.001`; +- batch_size:`4096`; +- epoch:`10`; + + + +### 6. 实验结果 + +1. 采用Criteo数据集中前`500w`条数据,最终测试集的结果为:`AUC: 0.778358, loss: 0.4765`; +2. 采用Criteo数据集全部内容: + - 学习参数:264,501,784; + - 单个Epoch运行时间【GPU:Tesla V100S-PCI】:323s; + - 测试集结果:`AUC: 0.787504, loss: 0.4762`; + diff --git a/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/criteo.py b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/criteo.py new file mode 100644 index 000000000..fce4ae988 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/criteo.py @@ -0,0 +1,111 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on July 13, 2020 + +dataset:criteo dataset sample +features: +- Label - Target variable that indicates if an ad was clicked (1) or not (0). +- I1-I13 - A total of 13 columns of integer features (mostly count features). +- C1-C26 - A total of 26 columns of categorical features. +The values of these features have been hashed onto 32 bits for anonymization purposes. + +@author: Ziyao Geng(zggzy1996@163.com) +""" + +import pandas as pd +import numpy as np + +from sklearn.preprocessing import LabelEncoder, KBinsDiscretizer +from sklearn.model_selection import train_test_split + +from utils import sparseFeature + + +def create_criteo_dataset(file, embed_dim=8, read_part=True, sample_num=100000, test_size=0.2,static=0): + """ + a example about creating criteo dataset + :param file: dataset's path + :param embed_dim: the embedding dimension of sparse features + :param read_part: whether to read part of it + :param sample_num: the number of instances if read_part is True + :param test_size: ratio of test dataset + :return: feature columns, train, test + """ + names = ['label', 'I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', + 'I12', 'I13', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', + 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', + 'C23', 'C24', 'C25', 'C26'] + + if read_part: + data_df = pd.read_csv(file, sep='\t', iterator=True, header=None, + names=names) + data_df = data_df.get_chunk(sample_num) + + else: + data_df = pd.read_csv(file, sep='\t', header=None, names=names) + + sparse_features = ['C' + str(i) for i in range(1, 27)] + dense_features = ['I' + str(i) for i in range(1, 14)] + features = sparse_features + dense_features + + data_df[sparse_features] = data_df[sparse_features].fillna('-1') + data_df[dense_features] = data_df[dense_features].fillna(0) + + # Bin continuous data into intervals. + est = KBinsDiscretizer(n_bins=100, encode='ordinal', strategy='uniform') + data_df[dense_features] = est.fit_transform(data_df[dense_features]) + + for feat in sparse_features: + le = LabelEncoder() + data_df[feat] = le.fit_transform(data_df[feat]) + + # ==============Feature Engineering=================== + + # ==================================================== + feature_columns = [sparseFeature(feat, int(data_df[feat].max()) + 1, embed_dim=embed_dim) + for feat in features] + train, test = train_test_split(data_df, test_size=test_size) + if static==1: + train_X = train[features].values[:3686400].astype('int32') + train_y = train['label'].values[:3686400].astype('int32') + print("train_X.shape",train_X.shape,flush=True) + print("train_y.shape",train_y.shape,flush=True) + test_X = test[features].values[:999424].astype('int32') + test_y = test['label'].values[:999424].astype('int32') + print("test_X.shape",test_X.shape,flush=True) + print("test_y.shape",test_y.shape,flush=True) + else: + train_X = train[features].values.astype('int32') + train_y = train['label'].values.astype('int32') + test_X = test[features].values.astype('int32') + test_y = test['label'].values.astype('int32') + + return feature_columns, (train_X, train_y), (test_X, test_y) \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/model.py b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/model.py new file mode 100644 index 000000000..b977ec0b8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/model.py @@ -0,0 +1,113 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on August 25, 2020 +Updated on May, 18, 2021 + +model: Factorization Machines + +@author: Ziyao Geng(zggzy1996@163.com) +""" + +import tensorflow as tf +from tensorflow.keras import Model +from tensorflow.keras.layers import Layer, Input +from tensorflow.keras.regularizers import l2 + + +class FM_Layer(Layer): + def __init__(self, feature_columns, k, w_reg=1e-6, v_reg=1e-6): + """ + Factorization Machines + :param feature_columns: A list. sparse column feature information. + :param k: the latent vector + :param w_reg: the regularization coefficient of parameter w + :param v_reg: the regularization coefficient of parameter v + """ + super(FM_Layer, self).__init__() + self.sparse_feature_columns = feature_columns + self.index_mapping = [] + self.feature_length = 0 + for feat in self.sparse_feature_columns: + self.index_mapping.append(self.feature_length) + self.feature_length += feat['feat_num'] + self.k = k + self.w_reg = w_reg + self.v_reg = v_reg + + def build(self, input_shape): + self.w0 = self.add_weight(name='w0', shape=(1,), + initializer=tf.zeros_initializer(), + trainable=True) + self.w = self.add_weight(name='w', shape=(self.feature_length, 1), + initializer=tf.random_normal_initializer(), + regularizer=l2(self.w_reg), + trainable=True) + self.V = self.add_weight(name='V', shape=(self.feature_length, self.k), + initializer=tf.random_normal_initializer(), + regularizer=l2(self.v_reg), + trainable=True) + + def call(self, inputs, **kwargs): + # mapping + inputs = inputs + tf.convert_to_tensor(self.index_mapping) + # first order + first_order = self.w0 + tf.reduce_sum(tf.nn.embedding_lookup(self.w, inputs), axis=1) # (batch_size, 1) + # second order + second_inputs = tf.nn.embedding_lookup(self.V, inputs) # (batch_size, fields, embed_dim) + square_sum = tf.square(tf.reduce_sum(second_inputs, axis=1, keepdims=True)) # (batch_size, 1, embed_dim) + sum_square = tf.reduce_sum(tf.square(second_inputs), axis=1, keepdims=True) # (batch_size, 1, embed_dim) + second_order = 0.5 * tf.reduce_sum(square_sum - sum_square, axis=2) # (batch_size, 1) + # outputs + outputs = first_order + second_order + return outputs + + +class FM(Model): + def __init__(self, feature_columns, k, w_reg=1e-6, v_reg=1e-6): + """ + Factorization Machines + :param feature_columns: A list. sparse column feature information. + :param k: the latent vector + :param w_reg: the regularization coefficient of parameter w + :param v_reg: the regularization coefficient of parameter v + """ + super(FM, self).__init__() + self.sparse_feature_columns = feature_columns + self.fm = FM_Layer(feature_columns, k, w_reg, v_reg) + + def call(self, inputs, **kwargs): + fm_outputs = self.fm(inputs) + outputs = tf.nn.sigmoid(fm_outputs) + return outputs + + def summary(self, **kwargs): + sparse_inputs = Input(shape=(len(self.sparse_feature_columns),), dtype=tf.int32) + Model(inputs=sparse_inputs, outputs=self.call(sparse_inputs)).summary() \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..31529da2e --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:OK +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..eab9bac08 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/requirements.txt @@ -0,0 +1,5 @@ +os +numpy +pandas +sklearn +tensorflow \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/run_1p.sh b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/run_1p.sh new file mode 100644 index 000000000..ef6943627 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/run_1p.sh @@ -0,0 +1 @@ +python3 train.py \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..17c66c2b8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,206 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=4096 +#网络名称,同目录名称 +Network="FM_ID2631_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +#train_epochs=5 +train_epochs=10 +#训练step +# train_steps=5 +#学习率 +learning_rate=0.001 +ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#precision_mode="allow_fp32_to_fp16" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/output/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/test/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/test/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/test/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +#参数修改 +#sed -i "s|../data/tfrecord|${data_path}/data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_CKPT = ROOT_PATH + '/|PRETRAINED_CKPT = '${cur_path}/|g" ${cur_path}/libs/configs/cfgs.py + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 train.py --data_dir=${data_path}\ + --epochs=${train_epochs}\ + --lr=${learning_rate}\ + --read_part=False\ + --batch_size=${batch_size} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path}> $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#参数回改 +#sed -i "s|${datth}/th}//io//tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_C'/|g" ${cur_paath}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +#TrainingTime=${e2e_time} +time=`grep loss: $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk '{print$3}' | tr -d 's'|tail -1` +trainsteps=`grep loss: $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk -F / '{print$1}' | awk 'END{print$1}'|tail -1` +TrainingTime=`awk 'BEGIN{printf "%.4f\n",'${time}'/'${trainsteps}'}'` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep AUC|awk '{print $3}'` +train_accuracy=`grep loss: $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk -F 'val_auc:' '{print$2}' | tr -d ' '|tail -1` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +#TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep loss: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk -F "loss:" '{print $2}'|awk -F "- auc:" '{print $1}' | tr -d ' ' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep loss|awk '{print $9}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +#ModelStatus="图执行FAIL" +#DTS_Number="DTS2022011211791" +#error_msg="not support,op" +#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#error_msg=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|tail -l` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..9bc649c8a --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,205 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=4096 +#网络名称,同目录名称 +Network="FM_ID2631_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +#train_epochs=5 +train_epochs=2 +#训练step +# train_steps=5 +#学习率 +learning_rate=0.001 +ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/output/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/test/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/test/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/test/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +#参数修改 +#sed -i "s|../data/tfrecord|${data_path}/data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_CKPT = ROOT_PATH + '/|PRETRAINED_CKPT = '${cur_path}/|g" ${cur_path}/libs/configs/cfgs.py + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 train.py --data_dir=${data_path}\ + --epochs=${train_epochs}\ + --lr=${learning_rate}\ + --read_part=True\ + --batch_size=${batch_size} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path}> $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#参数回改 +#sed -i "s|${datth}/th}//io//tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_C'/|g" ${cur_paath}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +#TrainingTime=${e2e_time} +time=`grep loss: $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk '{print$3}' | tr -d 's'|tail -1` +trainsteps=`grep loss: $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk -F / '{print$1}' | awk 'END{print$1}'|tail -1` +TrainingTime=`awk 'BEGIN{printf "%.4f\n",'${time}'/'${trainsteps}'}'` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep Accuracy|awk '{print $3}'` +train_accuracy=`grep loss: $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk -F 'val_auc:' '{print$2}' | tr -d ' '|tail -1` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +#TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep loss: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk -F "loss:" '{print $2}'|awk -F "- auc:" '{print $1}' | tr -d ' ' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep loss|awk '{print $9}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +#ModelStatus="图执行FAIL" +#DTS_Number="DTS2022011211791" +#error_msg="not support,op" +#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#error_msg=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|tail -l` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/test/train_performance_1p_static.sh new file mode 100644 index 000000000..2d251e025 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/test/train_performance_1p_static.sh @@ -0,0 +1,206 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=4096 +#网络名称,同目录名称 +Network="FM_ID2631_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +#train_epochs=5 +train_epochs=5 +#训练step +# train_steps=5 +#学习率 +learning_rate=0.001 +ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/output/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/test/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/test/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/test/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +#参数修改 +#sed -i "s|../data/tfrecord|${data_path}/data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_CKPT = ROOT_PATH + '/|PRETRAINED_CKPT = '${cur_path}/|g" ${cur_path}/libs/configs/cfgs.py + + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +python3 train.py --data_dir=${data_path}\ + --epochs=${train_epochs}\ + --lr=${learning_rate}\ + --read_part=True\ + --batch_size=${batch_size} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} \ + --static=1 > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#参数回改 +#sed -i "s|${datth}/th}//io//tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_C'/|g" ${cur_paath}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +#TrainingTime=${e2e_time} +Step=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +2 | awk '{print $1}' | awk -F "/" '{print $1}' |awk '{sum+=$1} END {print sum/NR}'` +Time=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +2 | awk '{print $3}' | tr -d s | awk '{sum+=$1} END {print sum/NR}'` +TrainingTime=`awk 'BEGIN{printf "%.6f\n",'${Time}'/'${Step}'}'` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep Accuracy|awk '{print $3}'` +train_accuracy=`grep loss: $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk -F 'val_auc:' '{print$2}' | tr -d ' '|tail -1` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +#TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep loss: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk -F "loss:" '{print $2}'|awk -F "- auc:" '{print $1}' | tr -d ' ' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep loss|awk '{print $9}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +#ModelStatus="图执行FAIL" +#DTS_Number="DTS2022011211791" +#error_msg="not support,op" +#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#error_msg=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|tail -l` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}_static" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..ac01157dc --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/train.py @@ -0,0 +1,185 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on August 25, 2020 + +train FM model + +@author: Ziyao Geng(zggzy1996@163.com) +""" +import npu_device +print('npu_device loaded') +import tensorflow as tf +from tensorflow.keras.losses import binary_crossentropy +from tensorflow.keras.callbacks import EarlyStopping +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.metrics import AUC + +from model import FM +from criteo import create_criteo_dataset + +import argparse +import ast +import os + +# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' + +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_dir', default="../Criteo", + help="""directory to data""") + parser.add_argument('--batch_size', default=4096, type=int, + help="""batch size for 1p""") + parser.add_argument('--epochs', default=10, type=int, + help="""epochs""") + parser.add_argument('--lr', default=0.001, type=float, + help="""learning rate""") + parser.add_argument('--read_part', dest="read_part", type=ast.literal_eval, + help='read part or all of the data') + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval, + help='auto_tune flag, default is False') + parser.add_argument('--static', default=0, type=int, + help="""static""") + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + if args.auto_tune: + npu_device.global_options().auto_tune_mode="RL,GA" + npu_device.global_options().variable_memory_max_size=10*1024*1024*1024 + npu_device.global_options().graph_memory_max_size=str("21*1024*1024*1024") + npu_device.open().as_default() +#===============================NPU Migration========================================= + + +if __name__ == '__main__': + # =============================== GPU ============================== + # gpu = tf.config.experimental.list_physical_devices(device_type='GPU') + # print(gpu) + # If you have GPU, and the value is GPU serial number. + # os.environ['CUDA_VISIBLE_DEVICES'] = '4' + # ========================= Hyper Parameters ======================= + # you can modify your file path + # file = '../dataset/Criteo/train.txt' + + args = parse_args() + npu_config() + file = os.path.join(args.data_dir, 'train.txt') + + read_part = args.read_part + sample_num = 5000000 + test_size = 0.2 + + k = 8 + + learning_rate = args.lr + batch_size = args.batch_size + epochs = args.epochs + # ========================== Create dataset ======================= + feature_columns, train, test = create_criteo_dataset(file=file, + read_part=read_part, + sample_num=sample_num, + test_size=test_size, + static=args.static) + train_X, train_y = train + test_X, test_y = test + # ============================Build Model========================== + one_device_strategy = tf.distribute.OneDeviceStrategy("device:CPU:0") + with one_device_strategy.scope(): + model = FM(feature_columns=feature_columns, k=k) + # model.summary() + # ============================Compile============================ + model.compile(loss=binary_crossentropy, optimizer=Adam(learning_rate=learning_rate), + metrics=[AUC()]) + # ============================model checkpoint====================== + # check_path = '../save/fm_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt' + # checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path, save_weights_only=True, + # verbose=1, period=5) + # ==============================Fit============================== + model.fit( + train_X, + train_y, + epochs=epochs, + #callbacks=[EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True)], # checkpoint + batch_size=batch_size, + validation_split=0.1, + verbose=2, + ) + tf.saved_model.save(model, "model_saved_model") + # ===========================Test============================== + #print('test AUC: %f' % model.evaluate(test_X, test_y, batch_size=batch_size)[1]) diff --git a/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/utils.py b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/utils.py new file mode 100644 index 000000000..947b4b95c --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/FM_ID2631_for_TensorFlow2.X/utils.py @@ -0,0 +1,57 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on July 13, 2020 +Updated on May 18, 2021 + +input feature columns: sparseFeature, denseFeature + +@author: Ziyao Geng(zggzy1996@163.com) +""" + + +def sparseFeature(feat, feat_num, embed_dim=4): + """ + create dictionary for sparse feature + :param feat: feature name + :param feat_num: the total number of sparse features that do not repeat + :param embed_dim: embedding dimension + :return: + """ + return {'feat_name': feat, 'feat_num': feat_num, 'embed_dim': embed_dim} + + +def denseFeature(feat): + """ + create dictionary for dense feature + :param feat: dense feature name + :return: + """ + return {'feat_name': feat} \ No newline at end of file -- Gitee From 19a5a162acc5ff09f5a8daa62c821e21874fe9a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:17:34 +0000 Subject: [PATCH 39/54] =?UTF-8?q?involution=5FID2515=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 284 + .../README.md | 264 + .../configs/ops_info.json | 7 + .../involution.py | 436 ++ .../modelzoo_level.txt | 3 + .../my_adam.py | 268 + .../my_backend.py | 5951 +++++++++++++++++ .../my_losses.py | 1197 ++++ .../requirements.txt | 0 .../test/train_full_1p_static.sh | 171 + .../test/train_performance_1p.sh | 136 + .../test/train_performance_1p_inv.sh | 160 + .../test/train_performance_1p_static.sh | 171 + 13 files changed, 9048 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/configs/ops_info.json create mode 100644 TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/involution.py create mode 100644 TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/my_adam.py create mode 100644 TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/my_backend.py create mode 100644 TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/my_losses.py create mode 100644 TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_full_1p_static.sh create mode 100644 TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_performance_1p_inv.sh create mode 100644 TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_performance_1p_static.sh diff --git a/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..dff3c9650 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/README.md @@ -0,0 +1,264 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Image Classification** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.4.8** + +**大小(Size):210KB** + +**框架(Framework):TensorFlow_2.6** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Official** + +**描述(Description):基于TensorFlow框架的Involutional网络训练代码** + +

概述

+ +- Involutional neural networks 由**Inverting the Inherence of Convolution**卷积, 即Involution构成。Involution kernel具有位置特定且与通道无关的特点。 + +- 参考论文: + + https://arxiv.org/abs/2103.06255 + +- 参考实现: + + https://github.com/keras-team/keras-io/blob/master/examples/vision/involution.py + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + + +## 默认配置 +- 网络结构 + _________________________________________________________________ + Model: "inv_model" + _________________________________________________________________ + Layer (type) Output Shape Param + + input_1 (InputLayer) [(None, 32, 32, 3)] 0 + _________________________________________________________________ + inv_1 (Involution) ((None, 32, 32, 3), (None 26 + _________________________________________________________________ + re_lu_3 (ReLU) (None, 32, 32, 3) 0 + _________________________________________________________________ + max_pooling2d_2 (MaxPooling2 (None, 16, 16, 3) 0 + _________________________________________________________________ + inv_2 (Involution) ((None, 16, 16, 3), (None 26 + _________________________________________________________________ + re_lu_4 (ReLU) (None, 16, 16, 3) 0 + _________________________________________________________________ + max_pooling2d_3 (MaxPooling2 (None, 8, 8, 3) 0 + _________________________________________________________________ + inv_3 (Involution) ((None, 8, 8, 3), (None, 26 + _________________________________________________________________ + re_lu_5 (ReLU) (None, 8, 8, 3) 0 + _________________________________________________________________ + flatten_1 (Flatten) (None, 192) 0 + _________________________________________________________________ + dense_2 (Dense) (None, 64) 12352 + _________________________________________________________________ + dense_3 (Dense) (None, 10) 650 + + Total params: 13,080 Trainable params: 13,074 Non-trainable params: 6 + _________________________________________________________________ + +- 训练超参(单卡): + - Batch size: 256 + - Train epoch: 200 + + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 否 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +拉起脚本中,默认开启混合精度传入,即precision_mode='allow_mix_precision' + +``` + ./train_performance_1p.sh --help + +parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + --max_step # of step for training + --learning_rate learning rate + --batch batch size + --modeldir model dir + --save_interval save interval for ckpt + --loss_scale enable loss scale ,default is False + -h/--help show help message +``` + +相关代码示例: + +``` +npu_device.global_options().precision_mode=FLAGS.precision_mode +``` + +

训练环境准备

+ +1. 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909)》 +2. 宿主机上需要安装Docker并登录[Ascend Hub中心](https://ascendhub.huawei.com/#/detail?name=ascend-tensorflow-arm)获取镜像。 + + 当前模型支持的镜像列表如[表1](#zh-cn_topic_0000001074498056_table1519011227314)所示。 + + **表 1** 镜像列表 + + + + + + + + + + + + + +

镜像名称

+

镜像版本

+

配套CANN版本

+
+

21.09

+

20.2

+
+ +3. 运行以下命令安装依赖。 +``` +pip install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + +

快速上手

+ +## 数据集准备 + +1、用户自行准备好数据集,包括训练数据集和验证数据集。使用的数据集是wikipedia + +2、训练的数据集放在train目录,验证的数据集放在eval目录 + +3、bert 预训练的模型及数据集可以参考"简述->开源代码路径处理" + +数据集目录参考如下: + +``` + +├─data +│ └─cifar-10-batches-py +│ ├──batchex.meta +│ ├──data_batch_1 +│ ├──data_batch_2 +│ ├──data_batch_3 +│ ├──data_batch_4 +│ ├──data_batch_5 +│ ├──readme.html +│ └─test_batch +``` + + + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2. 1单卡训练指令(脚本位于BertLarge_TF2.x_for_Tensorflow/test/train_full_1p_16bs.sh),请确保下面例子中的“--data_path”修改为用户的data的路径,这里选择将data文件夹放在home目录下。训练默认开启混合精度,即precision_mode='allow_mix_precision' + + bash train_full_1p_static.sh --data_path=/home/data + + + + +

高级参考

+ +## 脚本和示例代码 + +``` +|--configs #网络配置 +| |--ops_info.json +|--test #训练脚本目录 +| |--train_full_1p_static.sh # 全量静态训练 +| |--train_performance_1p.sh +| |--train_performance_1p_inv.sh +| |--train_performance_1p_static.sh +|--involution.py #网络脚本 +|--...... +``` + +## 脚本参数 + +``` + parser.add_argument('--data_path', default="../cifar-10-batches-py/", help="""directory to data""") + parser.add_argument('--batch_size', default=128, type=int, help="""batch size for 1p""") + parser.add_argument('--epochs', default=10, type=int, help="""epochs""") + parser.add_argument('--Drop_Reminder', dest="Drop_Reminder", type=ast.literal_eval, help='static or not') + parser.add_argument('--save_h5', dest="save_h5", type=ast.literal_eval, help='whether save h5 file after training') + parser.add_argument('--network', default="convolution", help='train network, only "convolution" or "involution"') + #===============================NPU Migration========================================= + parser.add_argument("--log_steps", default=50, type=int, help="TimeHis log Step.") + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval,help='auto_tune flag, default is False') +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡或者多卡训练。单卡和多卡通过运行不同脚本,支持单卡,8卡网络训练。模型存储路径为${cur_path}/output/$ASCEND_DEVICE_ID,包括训练的log以及checkpoints文件。以8卡训练为例,loss信息在文件${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。 diff --git a/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/configs/ops_info.json b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/configs/ops_info.json new file mode 100644 index 000000000..e211c6b46 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/configs/ops_info.json @@ -0,0 +1,7 @@ +{ + "black-list": { + "to-add": [ + "DivNoNan" + ] + } +} \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/involution.py b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/involution.py new file mode 100644 index 000000000..61d1537c7 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/involution.py @@ -0,0 +1,436 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os + +#os.environ["CUDA_VISIBLE_DEVICES"] = "7" +import npu_device +import time +import tensorflow as tf +import numpy as np +from tensorflow import keras +import argparse +import ast +# import matplotlib.pyplot as plt +from tensorflow.python.keras import backend as K +from tensorflow.python.keras.datasets.cifar import load_batch + +# Set seed for reproducibility. +tf.random.set_seed(42) + +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_path', default="../cifar-10-batches-py/", + help="""directory to data""") + parser.add_argument('--batch_size', default=128, type=int, + help="""batch size for 1p""") + parser.add_argument('--epochs', default=10, type=int, + help="""epochs""") + parser.add_argument('--Drop_Reminder', dest="Drop_Reminder", type=ast.literal_eval, + help='static or not') + parser.add_argument('--save_h5', dest="save_h5", type=ast.literal_eval, + help='whether save h5 file after training') + parser.add_argument('--network', default="convolution", + help='train network, only "convolution" or "involution"') + #===============================NPU Migration========================================= + parser.add_argument("--log_steps", default=50, type=int, + help="TimeHis log Step.") + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval,help='auto_tune flag, default is False') + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + +args = parse_args() + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + if args.auto_tune: + npu_device.global_options().auto_tune_mode="RL,GA" + npu_device.open().as_default() +#===============================NPU Migration========================================= +npu_config() + +class Involution(keras.layers.Layer): + def __init__( + self, channel, group_number, kernel_size, stride, reduction_ratio, name + ): + super().__init__(name=name) + + # Initialize the parameters. + self.channel = channel + self.group_number = group_number + self.kernel_size = kernel_size + self.stride = stride + self.reduction_ratio = reduction_ratio + + def build(self, input_shape): + # Get the shape of the input. + (_, height, width, num_channels) = input_shape + + # Scale the height and width with respect to the strides. + height = height // self.stride + width = width // self.stride + + # Define a layer that average pools the input tensor + # if stride is more than 1. + self.stride_layer = ( + keras.layers.AveragePooling2D( + pool_size=self.stride, strides=self.stride, padding="same" + ) + if self.stride > 1 + else tf.identity + ) + # Define the kernel generation layer. + self.kernel_gen = keras.Sequential( + [ + keras.layers.Conv2D( + filters=self.channel // self.reduction_ratio, kernel_size=1 + ), + keras.layers.BatchNormalization(), + keras.layers.ReLU(), + keras.layers.Conv2D( + filters=self.kernel_size * self.kernel_size * self.group_number, + kernel_size=1, + ), + ] + ) + # Define reshape layers + self.kernel_reshape = keras.layers.Reshape( + target_shape=( + height, + width, + self.kernel_size * self.kernel_size, + 1, + self.group_number, + ) + ) + self.input_patches_reshape = keras.layers.Reshape( + target_shape=( + height, + width, + self.kernel_size * self.kernel_size, + num_channels // self.group_number, + self.group_number, + ) + ) + self.output_reshape = keras.layers.Reshape( + target_shape=(height, width, num_channels) + ) + + def call(self, x): + # Generate the kernel with respect to the input tensor. + # B, H, W, K*K*G + kernel_input = self.stride_layer(x) + kernel = self.kernel_gen(kernel_input) + + # reshape the kerenl + # B, H, W, K*K, 1, G + kernel = self.kernel_reshape(kernel) + + # Extract input patches. + # B, H, W, K*K*C + input_patches = tf.image.extract_patches( + images=x, + sizes=[1, self.kernel_size, self.kernel_size, 1], + strides=[1, self.stride, self.stride, 1], + rates=[1, 1, 1, 1], + padding="SAME", + ) + + # Reshape the input patches to align with later operations. + # B, H, W, K*K, C//G, G + input_patches = self.input_patches_reshape(input_patches) + + # Compute the multiply-add operation of kernels and patches. + # B, H, W, K*K, C//G, G + output = tf.multiply(kernel, input_patches) + # B, H, W, C//G, G + output = tf.reduce_sum(output, axis=3) + + # Reshape the output kernel. + # B, H, W, C + output = self.output_reshape(output) + + # Return the output tensor and the kernel. + return output, kernel + +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + #self.opt = optimizer + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + +""" +## Testing the Involution layer +""" +# Define the input tensor. +input_tensor = tf.random.normal((32, 256, 256, 3)) + +# Compute involution with stride 1. +output_tensor, _ = Involution( + channel=3, group_number=1, kernel_size=5, stride=1, reduction_ratio=1, name="inv_1" +)(input_tensor) +print(f"with stride 1 ouput shape: {output_tensor.shape}") + +# Compute involution with stride 2. +output_tensor, _ = Involution( + channel=3, group_number=1, kernel_size=5, stride=2, reduction_ratio=1, name="inv_2" +)(input_tensor) +print(f"with stride 2 ouput shape: {output_tensor.shape}") + +# Compute involution with stride 1, channel 16 and reduction ratio 2. +output_tensor, _ = Involution( + channel=16, group_number=1, kernel_size=5, stride=1, reduction_ratio=2, name="inv_3" +)(input_tensor) +print( + "with channel 16 and reduction ratio 2 ouput shape: {}".format(output_tensor.shape) +) + +# Load the CIFAR10 dataset. +# TODO: download CIFAR10 dataset from blue zone and load it form local env. +def load_data(data_path): + num_train_samples = 50000 + x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8') + y_train = np.empty((num_train_samples,), dtype='uint8') + + for i in range(1, 6): + fpath = os.path.join(data_path, 'data_batch_' + str(i)) + (x_train[(i - 1) * 10000:i * 10000, :, :, :], + y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath) + + fpath = os.path.join(data_path, 'test_batch') + x_test, y_test = load_batch(fpath) + + y_train = np.reshape(y_train, (len(y_train), 1)) + y_test = np.reshape(y_test, (len(y_test), 1)) + + if K.image_data_format() == 'channels_last': + x_train = x_train.transpose(0, 2, 3, 1) + x_test = x_test.transpose(0, 2, 3, 1) + + x_test = x_test.astype(x_train.dtype) + y_test = y_test.astype(y_train.dtype) + + return (x_train, y_train), (x_test, y_test) + +print("loading the CIFAR10 dataset... from local path: ", args.data_path) +( + (train_images, train_labels), + (test_images, test_labels,), +) = load_data(args.data_path) # load_data("/home/hzh/involution/cifar-10-batches-py") + +# Normalize pixel values to be between 0 and 1. +(train_images, test_images) = (train_images / 255.0, test_images / 255.0) + +# Shuffle and batch the dataset. +train_ds = ( + tf.data.Dataset.from_tensor_slices((train_images, train_labels)) + .shuffle(args.batch_size) + .batch(args.batch_size, drop_remainder=args.Drop_Reminder) +) +train_ds = train_ds.prefetch(tf.data.experimental.AUTOTUNE) +test_ds = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(args.batch_size, drop_remainder=args.Drop_Reminder) +test_ds = test_ds.prefetch(tf.data.experimental.AUTOTUNE) + +""" +## Visualise the data +""" + +class_names = [ + "airplane", + "automobile", + "bird", + "cat", + "deer", + "dog", + "frog", + "horse", + "ship", + "truck", +] + +from my_adam import My_Adam +from my_losses import SparseCategoricalCrossentropy + +# convolution model +if args.network == "convolution": + # Build the conv model. + print("building the convolution model...") + conv_model = keras.Sequential( + [ + keras.layers.Conv2D(32, (3, 3), input_shape=(32, 32, 3), padding="same"), + keras.layers.ReLU(name="relu1"), + keras.layers.MaxPooling2D((2, 2)), + keras.layers.Conv2D(64, (3, 3), padding="same"), + keras.layers.ReLU(name="relu2"), + keras.layers.MaxPooling2D((2, 2)), + keras.layers.Conv2D(64, (3, 3), padding="same"), + keras.layers.ReLU(name="relu3"), + keras.layers.Flatten(), + keras.layers.Dense(64, activation="relu"), + keras.layers.Dense(10), + ] + ) + + + # Compile the mode with the necessary loss function and optimizer. + print("compiling the convolution model...") + conv_model.compile( + #optimizer="adam", + optimizer=My_Adam(learning_rate=0.001), + #loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), + loss=SparseCategoricalCrossentropy(from_logits=True), + metrics=["accuracy"], + ) + + # Train the model. + print("conv model training...") + conv_hist = conv_model.fit(train_ds, epochs=args.epochs, validation_data=test_ds, batch_size=args.batch_size, callbacks=TimeHistory(args.batch_size,args.log_steps), verbose=2) + if args.save_h5: + conv_model.save("model.h5") + +# involution model +if args.network == "involution": + # Build the involution model. + # print("building the involution model...") + + inputs = keras.Input(shape=(32, 32, 3)) + x, _ = Involution(channel=3, group_number=1, kernel_size=3, stride=1, reduction_ratio=2, name="inv_1")(inputs) + x = keras.layers.ReLU()(x) + x = keras.layers.MaxPooling2D((2, 2))(x) + x, _ = Involution(channel=3, group_number=1, kernel_size=3, stride=1, reduction_ratio=2, name="inv_2")(x) + x = keras.layers.ReLU()(x) + x = keras.layers.MaxPooling2D((2, 2))(x) + x, _ = Involution(channel=3, group_number=1, kernel_size=3, stride=1, reduction_ratio=2, name="inv_3")(x) + x = keras.layers.ReLU()(x) + x = keras.layers.Flatten()(x) + x = keras.layers.Dense(64, activation="relu")(x) + outputs = keras.layers.Dense(10)(x) + + inv_model = keras.Model(inputs=[inputs], outputs=[outputs], name="inv_model") + + # Compile the mode with the necessary loss function and optimizer. + print("compiling the involution model...") + inv_model.compile( + optimizer="adam", + loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), + metrics=["accuracy"], + ) + + # train the model + print("inv model training...") + inv_hist = inv_model.fit(train_ds, epochs=args.epochs, validation_data=test_ds, batch_size=args.batch_size, callbacks=TimeHistory(args.batch_size,args.log_steps), verbose=2) + if args.save_h5: + inv_hist.save("inv_model.h5") diff --git a/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..725a45e30 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:NOK +PercisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/my_adam.py b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/my_adam.py new file mode 100644 index 000000000..d25edc623 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/my_adam.py @@ -0,0 +1,268 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Adam optimizer implementation.""" +# pylint: disable=g-classes-have-attributes + +from tensorflow.python.eager import def_function +from tensorflow.python.framework import ops +# from tensorflow.python.keras import backend_config +# from tensorflow.python.keras.optimizer_v2 import optimizer_v2 +from keras import backend_config +from keras.optimizer_v2 import optimizer_v2 +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import state_ops +from tensorflow.python.training import gen_training_ops +from tensorflow.python.util.tf_export import keras_export +from tensorflow.python.framework import dtypes +from tensorflow.python.ops import variables as tf_variables + +@keras_export('keras.optimizers.My_Adam') +class My_Adam(optimizer_v2.OptimizerV2): + r"""Optimizer that implements the Adam algorithm. + + Adam optimization is a stochastic gradient descent method that is based on + adaptive estimation of first-order and second-order moments. + + According to + [Kingma et al., 2014](http://arxiv.org/abs/1412.6980), + the method is "*computationally + efficient, has little memory requirement, invariant to diagonal rescaling of + gradients, and is well suited for problems that are large in terms of + data/parameters*". + + Args: + learning_rate: A `Tensor`, floating point value, or a schedule that is a + `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable + that takes no arguments and returns the actual value to use, The + learning rate. Defaults to 0.001. + beta_1: A float value or a constant float tensor, or a callable + that takes no arguments and returns the actual value to use. The + exponential decay rate for the 1st moment estimates. Defaults to 0.9. + beta_2: A float value or a constant float tensor, or a callable + that takes no arguments and returns the actual value to use, The + exponential decay rate for the 2nd moment estimates. Defaults to 0.999. + epsilon: A small constant for numerical stability. This epsilon is + "epsilon hat" in the Kingma and Ba paper (in the formula just before + Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to + 1e-7. + amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm from + the paper "On the Convergence of Adam and beyond". Defaults to `False`. + name: Optional name for the operations created when applying gradients. + Defaults to `"Adam"`. + **kwargs: Keyword arguments. Allowed to be one of + `"clipnorm"` or `"clipvalue"`. + `"clipnorm"` (float) clips gradients by norm; `"clipvalue"` (float) clips + gradients by value. + + Usage: + + >>> opt = tf.keras.optimizers.Adam(learning_rate=0.1) + >>> var1 = tf.Variable(10.0) + >>> loss = lambda: (var1 ** 2)/2.0 # d(loss)/d(var1) == var1 + >>> step_count = opt.minimize(loss, [var1]).numpy() + >>> # The first step is `-learning_rate*sign(grad)` + >>> var1.numpy() + 9.9 + + Reference: + - [Kingma et al., 2014](http://arxiv.org/abs/1412.6980) + - [Reddi et al., 2018]( + https://openreview.net/pdf?id=ryQu7f-RZ) for `amsgrad`. + + Notes: + + The default value of 1e-7 for epsilon might not be a good default in + general. For example, when training an Inception network on ImageNet a + current good choice is 1.0 or 0.1. Note that since Adam uses the + formulation just before Section 2.1 of the Kingma and Ba paper rather than + the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon + hat" in the paper. + + The sparse implementation of this algorithm (used when the gradient is an + IndexedSlices object, typically because of `tf.gather` or an embedding + lookup in the forward pass) does apply momentum to variable slices even if + they were not used in the forward pass (meaning they have a gradient equal + to zero). Momentum decay (beta1) is also applied to the entire momentum + accumulator. This means that the sparse behavior is equivalent to the dense + behavior (in contrast to some momentum implementations which ignore momentum + unless a variable slice was actually used). + """ + + _HAS_AGGREGATE_GRAD = True + + def __init__(self, + learning_rate=0.001, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-7, + amsgrad=False, + name='My_Adam', + **kwargs): + super(My_Adam, self).__init__(name, **kwargs) + self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) + self._set_hyper('decay', self._initial_decay) + self._set_hyper('beta_1', beta_1) + self._set_hyper('beta_2', beta_2) + self.epsilon = epsilon or backend_config.epsilon() + self.amsgrad = amsgrad + + @property + def iterations(self): + """Variable. The number of training steps this Optimizer has run.""" + if self._iterations is None: + with self._distribution_strategy_scope(): + self._iterations = self.add_weight( + "iter", + shape=[], + # dtype=dtypes.int64, + dtype=dtypes.int32, + trainable=False, + aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA) + self._weights.append(self._iterations) + return self._iterations + + def _create_slots(self, var_list): + # Create slots for the first and second moments. + # Separate for-loops to respect the ordering of slot variables from v1. + for var in var_list: + self.add_slot(var, 'm') + for var in var_list: + self.add_slot(var, 'v') + if self.amsgrad: + for var in var_list: + self.add_slot(var, 'vhat') + + def _prepare_local(self, var_device, var_dtype, apply_state): + super(My_Adam, self)._prepare_local(var_device, var_dtype, apply_state) + + local_step = math_ops.cast(self.iterations + 1, var_dtype) + beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype)) + beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype)) + beta_1_power = math_ops.pow(beta_1_t, local_step) + beta_2_power = math_ops.pow(beta_2_t, local_step) + lr = (apply_state[(var_device, var_dtype)]['lr_t'] * + (math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power))) + apply_state[(var_device, var_dtype)].update( + dict( + lr=lr, + epsilon=ops.convert_to_tensor_v2_with_dispatch( + self.epsilon, var_dtype), + beta_1_t=beta_1_t, + beta_1_power=beta_1_power, + one_minus_beta_1_t=1 - beta_1_t, + beta_2_t=beta_2_t, + beta_2_power=beta_2_power, + one_minus_beta_2_t=1 - beta_2_t)) + + def set_weights(self, weights): + params = self.weights + # If the weights are generated by Keras V1 optimizer, it includes vhats + # even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2 + # optimizer has 2x + 1 variables. Filter vhats out for compatibility. + num_vars = int((len(params) - 1) / 2) + if len(weights) == 3 * num_vars + 1: + weights = weights[:len(params)] + super(My_Adam, self).set_weights(weights) + + def _resource_apply_dense(self, grad, var, apply_state=None): + var_device, var_dtype = var.device, var.dtype.base_dtype + coefficients = ((apply_state or {}).get((var_device, var_dtype)) + or self._fallback_apply_state(var_device, var_dtype)) + + m = self.get_slot(var, 'm') + v = self.get_slot(var, 'v') + + if not self.amsgrad: + return gen_training_ops.ResourceApplyAdam( + var=var.handle, + m=m.handle, + v=v.handle, + beta1_power=coefficients['beta_1_power'], + beta2_power=coefficients['beta_2_power'], + lr=coefficients['lr_t'], + beta1=coefficients['beta_1_t'], + beta2=coefficients['beta_2_t'], + epsilon=coefficients['epsilon'], + grad=grad, + use_locking=self._use_locking) + else: + vhat = self.get_slot(var, 'vhat') + return gen_training_ops.ResourceApplyAdamWithAmsgrad( + var=var.handle, + m=m.handle, + v=v.handle, + vhat=vhat.handle, + beta1_power=coefficients['beta_1_power'], + beta2_power=coefficients['beta_2_power'], + lr=coefficients['lr_t'], + beta1=coefficients['beta_1_t'], + beta2=coefficients['beta_2_t'], + epsilon=coefficients['epsilon'], + grad=grad, + use_locking=self._use_locking) + + def _resource_apply_sparse(self, grad, var, indices, apply_state=None): + var_device, var_dtype = var.device, var.dtype.base_dtype + coefficients = ((apply_state or {}).get((var_device, var_dtype)) + or self._fallback_apply_state(var_device, var_dtype)) + + # m_t = beta1 * m + (1 - beta1) * g_t + m = self.get_slot(var, 'm') + m_scaled_g_values = grad * coefficients['one_minus_beta_1_t'] + m_t = state_ops.assign(m, m * coefficients['beta_1_t'], + use_locking=self._use_locking) + with ops.control_dependencies([m_t]): + m_t = self._resource_scatter_add(m, indices, m_scaled_g_values) + + # v_t = beta2 * v + (1 - beta2) * (g_t * g_t) + v = self.get_slot(var, 'v') + v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t'] + v_t = state_ops.assign(v, v * coefficients['beta_2_t'], + use_locking=self._use_locking) + with ops.control_dependencies([v_t]): + v_t = self._resource_scatter_add(v, indices, v_scaled_g_values) + + if not self.amsgrad: + v_sqrt = math_ops.sqrt(v_t) + var_update = state_ops.assign_sub( + var, coefficients['lr'] * m_t / (v_sqrt + coefficients['epsilon']), + use_locking=self._use_locking) + return control_flow_ops.group(*[var_update, m_t, v_t]) + else: + v_hat = self.get_slot(var, 'vhat') + v_hat_t = math_ops.maximum(v_hat, v_t) + with ops.control_dependencies([v_hat_t]): + v_hat_t = state_ops.assign( + v_hat, v_hat_t, use_locking=self._use_locking) + v_hat_sqrt = math_ops.sqrt(v_hat_t) + var_update = state_ops.assign_sub( + var, + coefficients['lr'] * m_t / (v_hat_sqrt + coefficients['epsilon']), + use_locking=self._use_locking) + return control_flow_ops.group(*[var_update, m_t, v_t, v_hat_t]) + + def get_config(self): + config = super(My_Adam, self).get_config() + config.update({ + 'learning_rate': self._serialize_hyperparameter('learning_rate'), + 'decay': self._initial_decay, + 'beta_1': self._serialize_hyperparameter('beta_1'), + 'beta_2': self._serialize_hyperparameter('beta_2'), + 'epsilon': self.epsilon, + 'amsgrad': self.amsgrad, + }) + return config \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/my_backend.py b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/my_backend.py new file mode 100644 index 000000000..c544cf203 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/my_backend.py @@ -0,0 +1,5951 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=protected-access +# pylint: disable=redefined-outer-name +# pylint: disable=redefined-builtin +"""Keras backend API. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import itertools +import json +import os +import sys +import threading +import weakref + +import numpy as np + +from tensorflow.core.protobuf import config_pb2 +from tensorflow.python import tf2 +from tensorflow.python.client import session as session_module +from tensorflow.python.distribute import distribute_coordinator as dc +from tensorflow.python.distribute import distribute_coordinator_context as dc_context +from tensorflow.python.distribute import distribution_strategy_context +from tensorflow.python.distribute import multi_worker_util +from tensorflow.python.eager import context +from tensorflow.python.eager import function as eager_function +from tensorflow.python.eager import lift_to_graph +from tensorflow.python.framework import composite_tensor +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import device as tfdev +from tensorflow.python.framework import dtypes as dtypes_module +from tensorflow.python.framework import func_graph +from tensorflow.python.framework import ops +from tensorflow.python.framework import sparse_tensor +from tensorflow.python.framework import tensor_util +from tensorflow.python.keras import backend_config +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import clip_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import ctc_ops as ctc +from tensorflow.python.ops import functional_ops +from tensorflow.python.ops import gradients as gradients_module +from tensorflow.python.ops import image_ops +from tensorflow.python.ops import init_ops +from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import logging_ops +from tensorflow.python.ops import map_fn as map_fn_lib +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import random_ops +from tensorflow.python.ops import sparse_ops +from tensorflow.python.ops import state_ops +from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import +from tensorflow.python.ops import tensor_array_ops +from tensorflow.python.ops import variables as variables_module +from tensorflow.python.ops.ragged import ragged_factory_ops +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import nest +from tensorflow.python.util import object_identity +from tensorflow.python.util import tf_contextlib +from tensorflow.python.util import tf_inspect +from tensorflow.python.util.tf_export import keras_export + +py_all = all +py_sum = sum +py_any = any + +# INTERNAL UTILS + +# The internal graph maintained by Keras and used by the symbolic Keras APIs +# while executing eagerly (such as the functional API for model-building). +_GRAPH = None + +# A graph which is used for constructing functions in eager mode. +_CURRENT_SCRATCH_GRAPH = None + +# This is a thread local object that will hold the default internal TF session +# used by Keras. It can be set manually via `set_session(sess)`. +_SESSION = threading.local() + +# This dictionary holds a mapping {graph: learning_phase}. +# A learning phase is a bool tensor used to run Keras models in +# either train mode (learning_phase == 1) or test mode (learning_phase == 0). +_GRAPH_LEARNING_PHASES = weakref.WeakKeyDictionary() + +# This dictionary holds a mapping {graph: set_of_freezable_variables}. +# Each set tracks objects created via `freezable_variable` in the graph. +_FREEZABLE_VARS = weakref.WeakKeyDictionary() + +# _DUMMY_EAGER_GRAPH is used as a key in _GRAPH_LEARNING_PHASES. +# We keep a separate reference to it to make sure it does not get removed from +# _GRAPH_LEARNING_PHASES. +_DUMMY_EAGER_GRAPH = threading.local() + +# This boolean flag can be set to True to leave variable initialization +# up to the user. +# Change its value via `manual_variable_initialization(value)`. +_MANUAL_VAR_INIT = False + +# This list holds the available devices. +# It is populated when `_get_available_gpus()` is called for the first time. +# We assume our devices don't change henceforth. +_LOCAL_DEVICES = None + +# This dictionary holds a mapping between a graph and variables to initialize +# in the graph. +_GRAPH_VARIABLES = weakref.WeakKeyDictionary() + +# This dictionary holds a mapping between a graph and TF optimizers created in +# the graph. +_GRAPH_TF_OPTIMIZERS = weakref.WeakKeyDictionary() + +# The below functions are kept accessible from backend for compatibility. +epsilon = backend_config.epsilon +floatx = backend_config.floatx +image_data_format = backend_config.image_data_format +set_epsilon = backend_config.set_epsilon +set_floatx = backend_config.set_floatx +set_image_data_format = backend_config.set_image_data_format + + +@keras_export('keras.backend.backend') +def backend(): + """Publicly accessible method for determining the current backend. + + Only exists for API compatibility with multi-backend Keras. + + Returns: + The string "tensorflow". + """ + return 'tensorflow' + + +@keras_export('keras.backend.cast_to_floatx') +def cast_to_floatx(x): + """Cast a Numpy array to the default Keras float type. + + Arguments: + x: Numpy array or TensorFlow tensor. + + Returns: + The same array (Numpy array if `x` was a Numpy array, or TensorFlow tensor + if `x` was a tensor), cast to its new type. + + Example: + ```python + >>> from tensorflow.keras import backend as K + >>> K.floatx() + 'float32' + >>> arr = numpy.array([1.0, 2.0], dtype='float64') + >>> arr.dtype + dtype('float64') + >>> new_arr = K.cast_to_floatx(arr) + >>> new_arr + array([ 1., 2.], dtype=float32) + >>> new_arr.dtype + dtype('float32') + ``` + """ + if isinstance(x, (ops.Tensor, + variables_module.Variable, + sparse_tensor.SparseTensor)): + return math_ops.cast(x, dtype=floatx()) + return np.asarray(x, dtype=floatx()) + + +# A global dictionary mapping graph objects to an index of counters used +# for various layer/optimizer names in each graph. +# Allows to give unique autogenerated names to layers, in a graph-specific way. +PER_GRAPH_OBJECT_NAME_UIDS = weakref.WeakKeyDictionary() + + +@keras_export('keras.backend.get_uid') +def get_uid(prefix=''): + """Associates a string prefix with an integer counter in a TensorFlow graph. + + Arguments: + prefix: String prefix to index. + + Returns: + Unique integer ID. + + Example: + + ``` + >>> get_uid('dense') + 1 + >>> get_uid('dense') + 2 + ``` + """ + graph = get_graph() + if graph not in PER_GRAPH_OBJECT_NAME_UIDS: + PER_GRAPH_OBJECT_NAME_UIDS[graph] = collections.defaultdict(int) + layer_name_uids = PER_GRAPH_OBJECT_NAME_UIDS[graph] + layer_name_uids[prefix] += 1 + return layer_name_uids[prefix] + + +@keras_export('keras.backend.reset_uids') +def reset_uids(): + """Resets graph identifiers. + """ + + PER_GRAPH_OBJECT_NAME_UIDS.clear() + + +@keras_export('keras.backend.clear_session') +def clear_session(): + """Destroys the current TF graph and creates a new one. + + Useful to avoid clutter from old models / layers. + """ + global _SESSION + global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned + global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned + global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned + global _GRAPH + global _FREEZABLE_VARS + _GRAPH = None + ops.reset_default_graph() + reset_uids() + _SESSION.session = None + graph = get_graph() + with graph.as_default(): + with name_scope(''): + phase = array_ops.placeholder_with_default( + False, shape=(), name='keras_learning_phase') + _GRAPH_LEARNING_PHASES = {} + _GRAPH_LEARNING_PHASES[graph] = phase + _GRAPH_VARIABLES.pop(graph, None) + _GRAPH_TF_OPTIMIZERS.pop(graph, None) + _FREEZABLE_VARS.pop(graph, None) + + +@keras_export('keras.backend.manual_variable_initialization') +def manual_variable_initialization(value): + """Sets the manual variable initialization flag. + + This boolean flag determines whether + variables should be initialized + as they are instantiated (default), or if + the user should handle the initialization + (e.g. via `tf.compat.v1.initialize_all_variables()`). + + Arguments: + value: Python boolean. + """ + global _MANUAL_VAR_INIT + _MANUAL_VAR_INIT = value + + +@keras_export('keras.backend.learning_phase') +def learning_phase(): + """Returns the learning phase flag. + + The learning phase flag is a bool tensor (0 = test, 1 = train) + to be passed as input to any Keras function + that uses a different behavior at train time and test time. + + Returns: + Learning phase (scalar integer tensor or Python integer). + """ + graph = ops.get_default_graph() + if graph is _GRAPH: + # Don't enter an init_scope for the learning phase if eager execution + # is enabled but we're inside the Keras workspace graph. + learning_phase = symbolic_learning_phase() + _mark_func_graph_as_unsaveable(graph, learning_phase) + return learning_phase + with ops.init_scope(): + # We always check & set the learning phase inside the init_scope, + # otherwise the wrong default_graph will be used to look up the learning + # phase inside of functions & defuns. + # + # This is because functions & defuns (both in graph & in eager mode) + # will always execute non-eagerly using a function-specific default + # subgraph. + if context.executing_eagerly(): + if _DUMMY_EAGER_GRAPH not in _GRAPH_LEARNING_PHASES: + # Fallback to inference mode as default. + return 0 + return _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] + learning_phase = symbolic_learning_phase() + _mark_func_graph_as_unsaveable(graph, learning_phase) + return learning_phase + + +def global_learning_phase_is_set(): + return _DUMMY_EAGER_GRAPH in _GRAPH_LEARNING_PHASES + + +def _mark_func_graph_as_unsaveable(graph, learning_phase): + """Mark func graph as unsaveable due to use of symbolic keras learning phase. + + Functions that capture the symbolic learning phase cannot be exported to + SavedModel. Mark the funcgraph as unsaveable, so that an error will be raised + if it is exported. + + Args: + graph: Graph or FuncGraph object. + learning_phase: Learning phase placeholder or int defined in the graph. + """ + if graph.building_function and is_placeholder(learning_phase): + graph.mark_as_unsaveable( + 'The keras learning phase placeholder was used inside a function. ' + 'Exporting placeholders is not supported when saving out a SavedModel. ' + 'Please call `tf.keras.backend.set_learning_phase(0)` in the function ' + 'to set the learning phase to a constant value.') + + +def symbolic_learning_phase(): + graph = get_graph() + with graph.as_default(): + if graph not in _GRAPH_LEARNING_PHASES: + with name_scope(''): + phase = array_ops.placeholder_with_default( + False, shape=(), name='keras_learning_phase') + _GRAPH_LEARNING_PHASES[graph] = phase + return _GRAPH_LEARNING_PHASES[graph] + + +@keras_export('keras.backend.set_learning_phase') +def set_learning_phase(value): + """Sets the learning phase to a fixed value. + + Arguments: + value: Learning phase value, either 0 or 1 (integers). + + Raises: + ValueError: if `value` is neither `0` nor `1`. + """ + global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned + if value not in {0, 1}: + raise ValueError('Expected learning phase to be 0 or 1.') + with ops.init_scope(): + if context.executing_eagerly(): + # In an eager context, the learning phase values applies to both the eager + # context and the internal Keras graph. + _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value + _GRAPH_LEARNING_PHASES[get_graph()] = value + + +@keras_export('keras.backend.learning_phase_scope') +@tf_contextlib.contextmanager +def learning_phase_scope(value): + """Provides a scope within which the learning phase is equal to `value`. + + The learning phase gets restored to its original value upon exiting the scope. + + Arguments: + value: Learning phase value, either 0 or 1 (integers). + + Yields: + None. + + Raises: + ValueError: if `value` is neither `0` nor `1`. + """ + global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned + if value not in {0, 1}: + raise ValueError('Expected learning phase to be 0 or 1.') + + with ops.init_scope(): + if context.executing_eagerly(): + previous_eager_value = _GRAPH_LEARNING_PHASES.get( + _DUMMY_EAGER_GRAPH, None) + previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None) + + try: + set_learning_phase(value) + yield + finally: + # Restore learning phase to initial value. + with ops.init_scope(): + if context.executing_eagerly(): + if previous_eager_value is not None: + _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_eager_value + elif _DUMMY_EAGER_GRAPH in _GRAPH_LEARNING_PHASES: + del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] + + graph = get_graph() + if previous_graph_value is not None: + _GRAPH_LEARNING_PHASES[graph] = previous_graph_value + elif graph in _GRAPH_LEARNING_PHASES: + del _GRAPH_LEARNING_PHASES[graph] + + +@tf_contextlib.contextmanager +def eager_learning_phase_scope(value): + """Internal scope that sets the learning phase in eager / tf.function only. + + Arguments: + value: Learning phase value, either 0 or 1 (integers). + + Yields: + None. + + Raises: + ValueError: if `value` is neither `0` nor `1`. + """ + global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned + assert value in {0, 1} + assert ops.executing_eagerly_outside_functions() + global_learning_phase_was_set = global_learning_phase_is_set() + if global_learning_phase_was_set: + previous_value = learning_phase() + try: + _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value + yield + finally: + # Restore learning phase to initial value or unset. + if global_learning_phase_was_set: + _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_value + else: + del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] + + +def _current_graph(op_input_list): + """Return the graph members of `op_input_list`, or the current graph.""" + return ops._get_graph_from_inputs(op_input_list) + + +def _get_session(op_input_list=()): + """Returns the session object for the current thread.""" + global _SESSION + default_session = ops.get_default_session() + if default_session is not None: + session = default_session + else: + if ops.inside_function(): + raise RuntimeError('Cannot get session inside Tensorflow graph function.') + # If we don't have a session, or that session does not match the current + # graph, create and cache a new session. + if (getattr(_SESSION, 'session', None) is None or + _SESSION.session.graph is not _current_graph(op_input_list)): + # If we are creating the Session inside a tf.distribute.Strategy scope, + # we ask the strategy for the right session options to use. + if distribution_strategy_context.has_strategy(): + configure_and_create_distributed_session( + distribution_strategy_context.get_strategy()) + else: + _SESSION.session = session_module.Session( + config=get_default_session_config()) + session = _SESSION.session + return session + + +@keras_export(v1=['keras.backend.get_session']) +def get_session(op_input_list=()): + """Returns the TF session to be used by the backend. + + If a default TensorFlow session is available, we will return it. + + Else, we will return the global Keras session assuming it matches + the current graph. + + If no global Keras session exists at this point: + we will create a new global session. + + Note that you can manually set the global session + via `K.set_session(sess)`. + + Arguments: + op_input_list: An option sequence of tensors or ops, which will be used + to determine the current graph. Otherwise the default graph will be + used. + + Returns: + A TensorFlow session. + """ + session = _get_session(op_input_list) + if not _MANUAL_VAR_INIT: + with session.graph.as_default(): + _initialize_variables(session) + return session + + +def get_graph(): + if context.executing_eagerly(): + global _GRAPH + if _GRAPH is None: + _GRAPH = func_graph.FuncGraph('keras_graph') + return _GRAPH + else: + return ops.get_default_graph() + + +@tf_contextlib.contextmanager +def _scratch_graph(graph=None): + """Retrieve a shared and temporary func graph. + + The eager execution path lifts a subgraph from the keras global graph into + a scratch graph in order to create a function. DistributionStrategies, in + turn, constructs multiple functions as well as a final combined function. In + order for that logic to work correctly, all of the functions need to be + created on the same scratch FuncGraph. + + Args: + graph: A graph to be used as the current scratch graph. If not set then + a scratch graph will either be retrieved or created: + + Yields: + The current scratch graph. + """ + global _CURRENT_SCRATCH_GRAPH + if (_CURRENT_SCRATCH_GRAPH is not None and graph is not None and + _CURRENT_SCRATCH_GRAPH is not graph): + raise ValueError('Multiple scratch graphs specified.') + + if _CURRENT_SCRATCH_GRAPH: + yield _CURRENT_SCRATCH_GRAPH + return + + graph = graph or func_graph.FuncGraph('keras_scratch_graph') + try: + _CURRENT_SCRATCH_GRAPH = graph + yield graph + finally: + _CURRENT_SCRATCH_GRAPH = None + + +@keras_export(v1=['keras.backend.set_session']) +def set_session(session): + """Sets the global TensorFlow session. + + Arguments: + session: A TF Session. + """ + global _SESSION + _SESSION.session = session + + +def get_default_session_config(): + if os.environ.get('OMP_NUM_THREADS'): + logging.warning( + 'OMP_NUM_THREADS is no longer used by the default Keras config. ' + 'To configure the number of threads, use tf.config.threading APIs.') + + config = context.context().config + config.allow_soft_placement = True + + return config + + +def get_default_graph_uid_map(): + graph = ops.get_default_graph() + name_uid_map = PER_GRAPH_OBJECT_NAME_UIDS.get(graph, None) + if name_uid_map is None: + name_uid_map = collections.defaultdict(int) + PER_GRAPH_OBJECT_NAME_UIDS[graph] = name_uid_map + return name_uid_map + + +# DEVICE MANIPULATION + + +class _TfDeviceCaptureOp(object): + """Class for capturing the TF device scope.""" + + def __init__(self): + self.device = None + + def _set_device(self, device): + """This method captures TF's explicit device scope setting.""" + if tfdev.is_device_spec(device): + device = device.to_string() + self.device = device + + def _set_device_from_string(self, device_str): + self.device = device_str + + +def _get_current_tf_device(): + """Return explicit device of current context, otherwise returns `None`. + + Returns: + If the current device scope is explicitly set, it returns a string with + the device (`CPU` or `GPU`). If the scope is not explicitly set, it will + return `None`. + """ + graph = get_graph() + op = _TfDeviceCaptureOp() + graph._apply_device_functions(op) + return tfdev.DeviceSpec.from_string(op.device) + + +def _is_current_explicit_device(device_type): + """Check if the current device is explicitly set on the device type specified. + + Arguments: + device_type: A string containing `GPU` or `CPU` (case-insensitive). + + Returns: + A boolean indicating if the current device scope is explicitly set on the + device type. + + Raises: + ValueError: If the `device_type` string indicates an unsupported device. + """ + device_type = device_type.upper() + if device_type not in ['CPU', 'GPU']: + raise ValueError('`device_type` should be either "CPU" or "GPU".') + device = _get_current_tf_device() + return device is not None and device.device_type == device_type.upper() + + +def _get_available_gpus(): + """Get a list of available gpu devices (formatted as strings). + + Returns: + A list of available GPU devices. + """ + if ops.executing_eagerly_outside_functions(): + # Returns names of devices directly. + return [name for name in context.list_devices() if 'GPU' in name] + + global _LOCAL_DEVICES + if _LOCAL_DEVICES is None: + _LOCAL_DEVICES = get_session().list_devices() + return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU'] + + +def _has_nchw_support(): + """Check whether the current scope supports NCHW ops. + + TensorFlow does not support NCHW on CPU. Therefore we check if we are not + explicitly put on + CPU, and have GPUs available. In this case there will be soft-placing on the + GPU device. + + Returns: + bool: if the current scope device placement would support nchw + """ + explicitly_on_cpu = _is_current_explicit_device('CPU') + gpus_available = bool(_get_available_gpus()) + return not explicitly_on_cpu and gpus_available + + +# VARIABLE MANIPULATION + + +def _constant_to_tensor(x, dtype): + """Convert the input `x` to a tensor of type `dtype`. + + This is slightly faster than the _to_tensor function, at the cost of + handling fewer cases. + + Arguments: + x: An object to be converted (numpy arrays, floats, ints and lists of + them). + dtype: The destination type. + + Returns: + A tensor. + """ + return constant_op.constant(x, dtype=dtype) + + +def _to_tensor(x, dtype): + """Convert the input `x` to a tensor of type `dtype`. + + Arguments: + x: An object to be converted (numpy array, list, tensors). + dtype: The destination type. + + Returns: + A tensor. + """ + return ops.convert_to_tensor(x, dtype=dtype) + + +@keras_export('keras.backend.is_sparse') +def is_sparse(tensor): + """Returns whether a tensor is a sparse tensor. + + Arguments: + tensor: A tensor instance. + + Returns: + A boolean. + + Example: + ```python + >>> from keras import backend as K + >>> a = K.placeholder((2, 2), sparse=False) + >>> print(K.is_sparse(a)) + False + >>> b = K.placeholder((2, 2), sparse=True) + >>> print(K.is_sparse(b)) + True + ``` + """ + return isinstance(tensor, sparse_tensor.SparseTensor) + + +@keras_export('keras.backend.to_dense') +def to_dense(tensor): + """Converts a sparse tensor into a dense tensor and returns it. + + Arguments: + tensor: A tensor instance (potentially sparse). + + Returns: + A dense tensor. + + Examples: + ```python + >>> from keras import backend as K + >>> b = K.placeholder((2, 2), sparse=True) + >>> print(K.is_sparse(b)) + True + >>> c = K.to_dense(b) + >>> print(K.is_sparse(c)) + False + ``` + """ + if is_sparse(tensor): + return sparse_ops.sparse_tensor_to_dense(tensor) + else: + return tensor + + +@keras_export('keras.backend.name_scope', v1=[]) +def name_scope(name): + """A context manager for use when defining a Python op. + + This context manager pushes a name scope, which will make the name of all + operations added within it have a prefix. + + For example, to define a new Python op called `my_op`: + + ```python + def my_op(a): + with tf.name_scope("MyOp") as scope: + a = tf.convert_to_tensor(a, name="a") + # Define some computation that uses `a`. + return foo_op(..., name=scope) + ``` + + When executed, the Tensor `a` will have the name `MyOp/a`. + + Args: + name: The prefix to use on all names created within the name scope. + + Returns: + Name scope context manager. + """ + return ops.name_scope_v2(name) + + +@keras_export('keras.backend.variable') +def variable(value, dtype=None, name=None, constraint=None): + """Instantiates a variable and returns it. + + Arguments: + value: Numpy array, initial value of the tensor. + dtype: Tensor type. + name: Optional name string for the tensor. + constraint: Optional projection function to be + applied to the variable after an optimizer update. + + Returns: + A variable instance (with Keras metadata included). + + Examples: + ```python + >>> import numpy as np + >>> from keras import backend as K + >>> val = np.array([[1, 2], [3, 4]]) + >>> kvar = K.variable(value=val, dtype='float64', name='example_var') + >>> K.dtype(kvar) + 'float64' + >>> print(kvar) + example_var + >>> kvar.eval() + array([[ 1., 2.], + [ 3., 4.]]) + ``` + """ + if dtype is None: + dtype = floatx() + if hasattr(value, 'tocoo'): + sparse_coo = value.tocoo() + indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims( + sparse_coo.col, 1)), 1) + v = sparse_tensor.SparseTensor( + indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape) + v._keras_shape = sparse_coo.shape + return v + v = variables_module.Variable( + value, + dtype=dtypes_module.as_dtype(dtype), + name=name, + constraint=constraint) + if isinstance(value, np.ndarray): + v._keras_shape = value.shape + elif hasattr(value, 'shape'): + v._keras_shape = int_shape(value) + track_variable(v) + return v + + +def track_tf_optimizer(tf_optimizer): + """Tracks the given TF optimizer for initialization of its variables.""" + if context.executing_eagerly(): + return + graph = get_graph() + optimizers = _GRAPH_TF_OPTIMIZERS.setdefault(graph, weakref.WeakSet()) + optimizers.add(tf_optimizer) + + +def track_variable(v): + """Tracks the given variable for initialization.""" + if context.executing_eagerly(): + return + graph = v.graph if hasattr(v, 'graph') else get_graph() + if graph not in _GRAPH_VARIABLES: + _GRAPH_VARIABLES[graph] = object_identity.ObjectIdentityWeakSet() + _GRAPH_VARIABLES[graph].add(v) + + +def unique_object_name(name, + name_uid_map=None, + avoid_names=None, + namespace='', + zero_based=False): + """Makes a object name (or arbitrary string) unique within a TensorFlow graph. + + Arguments: + name: String name to make unique. + name_uid_map: An optional defaultdict(int) to use when creating unique + names. If None (default), uses a per-Graph dictionary. + avoid_names: An optional set or dict with names which should not be used. If + None (default) does not avoid any names. + namespace: Gets a name which is unique within the (graph, namespace). Layers + which are not Networks use a blank namespace and so get graph-global + names. + zero_based: If True, name sequences start with no suffix (e.g. "dense", + "dense_1"). If False, naming is one-based ("dense_1", "dense_2"). + + Returns: + Unique string name. + + Example: + + ```python + _unique_layer_name('dense') # dense_1 + _unique_layer_name('dense') # dense_2 + ``` + """ + if name_uid_map is None: + name_uid_map = get_default_graph_uid_map() + if avoid_names is None: + avoid_names = set() + proposed_name = None + while proposed_name is None or proposed_name in avoid_names: + name_key = (namespace, name) + if zero_based: + number = name_uid_map[name_key] + if number: + proposed_name = name + '_' + str(number) + else: + proposed_name = name + name_uid_map[name_key] += 1 + else: + name_uid_map[name_key] += 1 + proposed_name = name + '_' + str(name_uid_map[name_key]) + return proposed_name + + +def _get_variables(graph=None): + """Returns variables corresponding to the given graph for initialization.""" + assert not context.executing_eagerly() + variables = _GRAPH_VARIABLES.setdefault(graph, weakref.WeakSet()) + for opt in _GRAPH_TF_OPTIMIZERS.get(graph, set()): + variables.update(opt.optimizer.variables()) + return variables + + +def _initialize_variables(session): + """Utility to initialize uninitialized variables on the fly.""" + variables = _get_variables(get_graph()) + candidate_vars = [] + for v in variables: + if not getattr(v, '_keras_initialized', False): + candidate_vars.append(v) + if candidate_vars: + # This step is expensive, so we only run it on variables not already + # marked as initialized. + is_initialized = session.run( + [variables_module.is_variable_initialized(v) for v in candidate_vars]) + uninitialized_vars = [] + for flag, v in zip(is_initialized, candidate_vars): + if not flag: + uninitialized_vars.append(v) + v._keras_initialized = True + if uninitialized_vars: + session.run(variables_module.variables_initializer(uninitialized_vars)) + + +@keras_export('keras.backend.constant') +def constant(value, dtype=None, shape=None, name=None): + """Creates a constant tensor. + + Arguments: + value: A constant value (or list) + dtype: The type of the elements of the resulting tensor. + shape: Optional dimensions of resulting tensor. + name: Optional name for the tensor. + + Returns: + A Constant Tensor. + """ + if dtype is None: + dtype = floatx() + + return constant_op.constant(value, dtype=dtype, shape=shape, name=name) + + +@keras_export('keras.backend.is_keras_tensor') +def is_keras_tensor(x): + """Returns whether `x` is a Keras tensor. + + A "Keras tensor" is a tensor that was returned by a Keras layer, + (`Layer` class) or by `Input`. + + Arguments: + x: A candidate tensor. + + Returns: + A boolean: Whether the argument is a Keras tensor. + + Raises: + ValueError: In case `x` is not a symbolic tensor. + + Examples: + ```python + >>> import tensorflow as tf + >>> import numpy + >>> from keras import backend as K + >>> from keras.layers import Input, Dense + >>> np_var = numpy.array([1, 2]) + >>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor. + ValueError + >>> k_var = tf.compat.v1.placeholder('float32', shape=(1,1)) + >>> K.is_keras_tensor(k_var) # A variable indirectly created outside of + keras is not a Keras tensor. + False + >>> keras_var = K.variable(np_var) + >>> K.is_keras_tensor(keras_var) # A variable created with the keras + backend is not a Keras tensor. + False + >>> keras_placeholder = K.placeholder(shape=(2, 4, 5)) + >>> K.is_keras_tensor(keras_placeholder) # A placeholder is not a Keras + tensor. + False + >>> keras_input = Input([10]) + >>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor. + True + >>> keras_layer_output = Dense(10)(keras_input) + >>> K.is_keras_tensor(keras_layer_output) # Any Keras layer output is a + Keras tensor. + True + ``` + """ + if not isinstance(x, (ops.Tensor, + variables_module.Variable, + sparse_tensor.SparseTensor)): + raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) + + '`. Expected a symbolic tensor instance.') + return hasattr(x, '_keras_history') + + +@keras_export('keras.backend.placeholder') +def placeholder(shape=None, + ndim=None, + dtype=None, + sparse=False, + name=None, + ragged=False): + """Instantiates a placeholder tensor and returns it. + + Arguments: + shape: Shape of the placeholder + (integer tuple, may include `None` entries). + ndim: Number of axes of the tensor. + At least one of {`shape`, `ndim`} must be specified. + If both are specified, `shape` is used. + dtype: Placeholder type. + sparse: Boolean, whether the placeholder should have a sparse type. + name: Optional name string for the placeholder. + ragged: Boolean, whether the placeholder should have a ragged type. + In this case, values of 'None' in the 'shape' argument represent + ragged dimensions. For more information about RaggedTensors, see this + [guide](https://www.tensorflow.org/guide/ragged_tensors). + + Raises: + ValueError: If called with eager execution + ValueError: If called with sparse = True and ragged = True. + + Returns: + Tensor instance (with Keras metadata included). + + Examples: + ```python + >>> from keras import backend as K + >>> input_ph = K.placeholder(shape=(2, 4, 5)) + >>> input_ph + + ``` + """ + if sparse and ragged: + raise ValueError( + 'Cannot set both sparse and ragged to True when creating a placeholder.' + ) + + if dtype is None: + dtype = floatx() + if not shape: + if ndim: + shape = tuple([None for _ in range(ndim)]) + with get_graph().as_default(): + if sparse: + x = array_ops.sparse_placeholder(dtype, shape=shape, name=name) + elif ragged: + ragged_rank = 0 + for i in range(1, len(shape)): + if shape[i] is None: + ragged_rank += 1 + else: + break + value_shape = shape[(ragged_rank + 1):] + + x = ragged_factory_ops.placeholder( + dtype=dtype, + ragged_rank=ragged_rank, + value_shape=value_shape, + name=name) + else: + x = array_ops.placeholder(dtype, shape=shape, name=name) + return x + + +def is_placeholder(x): + """Returns whether `x` is a placeholder. + + Arguments: + x: A candidate placeholder. + + Returns: + Boolean. + """ + try: + if isinstance(x, composite_tensor.CompositeTensor): + flat_components = nest.flatten(x, expand_composites=True) + return py_any(is_placeholder(c) for c in flat_components) + else: + return x.op.type == 'Placeholder' + except AttributeError: + return False + + +def freezable_variable(value, shape=None, name=None): + """A tensor-like object whose value can be updated only up until execution. + + After creating the freezable variable, you can update its value by calling + `var.update_value(new_value)` (similar to a regular variable). + Unlike an actual variable, the value used during execution is the current + value at the time the execution function (`backend.function()`) was created. + + This is an internal API, expected to be temporary. It is used to implement a + mutable `trainable` property for `BatchNormalization` layers, with a frozen + value after model compilation. + + We don't use a plain variable in this case because we need the value used + in a specific model to be frozen after `compile` has been called + (e.g. GAN use case). + + Arguments: + value: The initial value for the tensor-like object. + shape: The shape for the tensor-like object (cannot be changed). + name: The name for the tensor-like object. + + Returns: + A tensor-like object with a static value that can be updated via + `x.update_value(new_value)`, up until creating an execution function + (afterwards the value is fixed). + """ + graph = get_graph() + with graph.as_default(): + x = array_ops.placeholder_with_default( + value, shape=shape, name=name) + x._initial_value = value + x._current_value = value + + def update_value(new_value): + x._current_value = new_value + + def get_value(): + return x._current_value + + x.update_value = update_value + x.get_value = get_value + + global _FREEZABLE_VARS + if graph not in _FREEZABLE_VARS: + _FREEZABLE_VARS[graph] = object_identity.ObjectIdentityWeakSet() + _FREEZABLE_VARS[graph].add(x) + return x + + +@keras_export('keras.backend.shape') +def shape(x): + """Returns the symbolic shape of a tensor or variable. + + Arguments: + x: A tensor or variable. + + Returns: + A symbolic shape (which is itself a tensor). + + Examples: + + ```python + # TensorFlow example + >>> from keras import backend as K + >>> tf_session = K.get_session() + >>> val = np.array([[1, 2], [3, 4]]) + >>> kvar = K.variable(value=val) + >>> input = keras.backend.placeholder(shape=(2, 4, 5)) + >>> K.shape(kvar) + + >>> K.shape(input) + + # To get integer shape (Instead, you can use K.int_shape(x)) + >>> K.shape(kvar).eval(session=tf_session) + array([2, 2], dtype=int32) + >>> K.shape(input).eval(session=tf_session) + array([2, 4, 5], dtype=int32) + ``` + """ + return array_ops.shape(x) + + +@keras_export('keras.backend.int_shape') +def int_shape(x): + """Returns the shape of tensor or variable as a tuple of int or None entries. + + Arguments: + x: Tensor or variable. + + Returns: + A tuple of integers (or None entries). + + Examples: + ```python + >>> from keras import backend as K + >>> input = K.placeholder(shape=(2, 4, 5)) + >>> K.int_shape(input) + (2, 4, 5) + >>> val = np.array([[1, 2], [3, 4]]) + >>> kvar = K.variable(value=val) + >>> K.int_shape(kvar) + (2, 2) + ``` + """ + try: + shape = x.shape + if not isinstance(shape, tuple): + shape = tuple(shape.as_list()) + return shape + except ValueError: + return None + + +@keras_export('keras.backend.ndim') +def ndim(x): + """Returns the number of axes in a tensor, as an integer. + + Arguments: + x: Tensor or variable. + + Returns: + Integer (scalar), number of axes. + + Examples: + ```python + >>> from keras import backend as K + >>> input = K.placeholder(shape=(2, 4, 5)) + >>> val = np.array([[1, 2], [3, 4]]) + >>> kvar = K.variable(value=val) + >>> K.ndim(input) + 3 + >>> K.ndim(kvar) + 2 + ``` + """ + dims = x.shape._dims + if dims is not None: + return len(dims) + return None + + +@keras_export('keras.backend.dtype') +def dtype(x): + """Returns the dtype of a Keras tensor or variable, as a string. + + Arguments: + x: Tensor or variable. + + Returns: + String, dtype of `x`. + + Examples: + ```python + >>> from keras import backend as K + >>> K.dtype(K.placeholder(shape=(2,4,5))) + 'float32' + >>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32')) + 'float32' + >>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64')) + 'float64' + # Keras variable + >>> kvar = K.variable(np.array([[1, 2], [3, 4]])) + >>> K.dtype(kvar) + 'float32' + >>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32') + >>> K.dtype(kvar) + 'float32' + ``` + """ + return x.dtype.base_dtype.name + + +@keras_export('keras.backend.eval') +def eval(x): + """Evaluates the value of a variable. + + Arguments: + x: A variable. + + Returns: + A Numpy array. + + Examples: + ```python + >>> from keras import backend as K + >>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32') + >>> K.eval(kvar) + array([[ 1., 2.], + [ 3., 4.]], dtype=float32) + ``` + """ + return get_value(to_dense(x)) + + +@keras_export('keras.backend.zeros') +def zeros(shape, dtype=None, name=None): + """Instantiates an all-zeros variable and returns it. + + Arguments: + shape: Tuple or list of integers, shape of returned Keras variable + dtype: data type of returned Keras variable + name: name of returned Keras variable + + Returns: + A variable (including Keras metadata), filled with `0.0`. + Note that if `shape` was symbolic, we cannot return a variable, + and will return a dynamically-shaped tensor instead. + + Example: + + ```python + from tensorflow.keras import backend as K + kvar = K.zeros((3,4)) + K.eval(kvar) + # array([[ 0., 0., 0., 0.], [ 0., 0., 0., 0.], + # [ 0., 0., 0., 0.]], dtype=float32) + A = tf.constant([1,2,3]) + kvar2 = K.zeros(A.shape) # [0., 0., 0.] float32 by default + kvar3 = K.zeros(A.shape,dtype=tf.int32) # [0, 0, 0] with int32 dtype + kvar4 = K.zeros([2,3]) # [[0., 0., 0.], [0., 0., 0.]] + ``` + + """ + with ops.init_scope(): + if dtype is None: + dtype = floatx() + tf_dtype = dtypes_module.as_dtype(dtype) + v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name) + if py_all(v.shape.as_list()): + return variable(v, dtype=dtype, name=name) + track_variable(v) + return v + + +@keras_export('keras.backend.ones') +def ones(shape, dtype=None, name=None): + """Instantiates an all-ones variable and returns it. + + Arguments: + shape: Tuple of integers, shape of returned Keras variable. + dtype: String, data type of returned Keras variable. + name: String, name of returned Keras variable. + + Returns: + A Keras variable, filled with `1.0`. + Note that if `shape` was symbolic, we cannot return a variable, + and will return a dynamically-shaped tensor instead. + + Example: + ```python + >>> from keras import backend as K + >>> kvar = K.ones((3,4)) + >>> K.eval(kvar) + array([[ 1., 1., 1., 1.], + [ 1., 1., 1., 1.], + [ 1., 1., 1., 1.]], dtype=float32) + ``` + """ + with ops.init_scope(): + if dtype is None: + dtype = floatx() + tf_dtype = dtypes_module.as_dtype(dtype) + v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name) + if py_all(v.shape.as_list()): + return variable(v, dtype=dtype, name=name) + track_variable(v) + return v + + +@keras_export('keras.backend.eye') +def eye(size, dtype=None, name=None): + """Instantiate an identity matrix and returns it. + + Arguments: + size: Integer, number of rows/columns. + dtype: String, data type of returned Keras variable. + name: String, name of returned Keras variable. + + Returns: + A Keras variable, an identity matrix. + + Example: + ```python + >>> from keras import backend as K + >>> kvar = K.eye(3) + >>> K.eval(kvar) + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]], dtype=float32) + ``` + + """ + if dtype is None: + dtype = floatx() + tf_dtype = dtypes_module.as_dtype(dtype) + return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name) + + +@keras_export('keras.backend.zeros_like') +def zeros_like(x, dtype=None, name=None): + """Instantiates an all-zeros variable of the same shape as another tensor. + + Arguments: + x: Keras variable or Keras tensor. + dtype: dtype of returned Keras variable. + `None` uses the dtype of `x`. + name: name for the variable to create. + + Returns: + A Keras variable with the shape of `x` filled with zeros. + + Example: + + ```python + from tensorflow.keras import backend as K + kvar = K.variable(np.random.random((2,3))) + kvar_zeros = K.zeros_like(kvar) + K.eval(kvar_zeros) + # array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32) + ``` + + """ + return array_ops.zeros_like(x, dtype=dtype, name=name) + + +@keras_export('keras.backend.ones_like') +def ones_like(x, dtype=None, name=None): + """Instantiates an all-ones variable of the same shape as another tensor. + + Arguments: + x: Keras variable or tensor. + dtype: String, dtype of returned Keras variable. + None uses the dtype of x. + name: String, name for the variable to create. + + Returns: + A Keras variable with the shape of x filled with ones. + + Example: + ```python + >>> from keras import backend as K + >>> kvar = K.variable(np.random.random((2,3))) + >>> kvar_ones = K.ones_like(kvar) + >>> K.eval(kvar_ones) + array([[ 1., 1., 1.], + [ 1., 1., 1.]], dtype=float32) + ``` + """ + return array_ops.ones_like(x, dtype=dtype, name=name) + + +def identity(x, name=None): + """Returns a tensor with the same content as the input tensor. + + Arguments: + x: The input tensor. + name: String, name for the variable to create. + + Returns: + A tensor of the same shape, type and content. + """ + return array_ops.identity(x, name=name) + + +@keras_export('keras.backend.random_uniform_variable') +def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): + """Instantiates a variable with values drawn from a uniform distribution. + + Arguments: + shape: Tuple of integers, shape of returned Keras variable. + low: Float, lower boundary of the output interval. + high: Float, upper boundary of the output interval. + dtype: String, dtype of returned Keras variable. + name: String, name of returned Keras variable. + seed: Integer, random seed. + + Returns: + A Keras variable, filled with drawn samples. + + Example: + ```python + # TensorFlow example + >>> kvar = K.random_uniform_variable((2,3), 0, 1) + >>> kvar + + >>> K.eval(kvar) + array([[ 0.10940075, 0.10047495, 0.476143 ], + [ 0.66137183, 0.00869417, 0.89220798]], dtype=float32) + ``` + """ + if dtype is None: + dtype = floatx() + tf_dtype = dtypes_module.as_dtype(dtype) + if seed is None: + # ensure that randomness is conditioned by the Numpy RNG + seed = np.random.randint(10e8) + value = init_ops.random_uniform_initializer( + low, high, dtype=tf_dtype, seed=seed)(shape) + return variable(value, dtype=dtype, name=name) + + +@keras_export('keras.backend.random_normal_variable') +def random_normal_variable(shape, mean, scale, dtype=None, name=None, + seed=None): + """Instantiates a variable with values drawn from a normal distribution. + + Arguments: + shape: Tuple of integers, shape of returned Keras variable. + mean: Float, mean of the normal distribution. + scale: Float, standard deviation of the normal distribution. + dtype: String, dtype of returned Keras variable. + name: String, name of returned Keras variable. + seed: Integer, random seed. + + Returns: + A Keras variable, filled with drawn samples. + + Example: + ```python + # TensorFlow example + >>> kvar = K.random_normal_variable((2,3), 0, 1) + >>> kvar + + >>> K.eval(kvar) + array([[ 1.19591331, 0.68685907, -0.63814116], + [ 0.92629528, 0.28055015, 1.70484698]], dtype=float32) + ``` + """ + if dtype is None: + dtype = floatx() + tf_dtype = dtypes_module.as_dtype(dtype) + if seed is None: + # ensure that randomness is conditioned by the Numpy RNG + seed = np.random.randint(10e8) + value = init_ops.random_normal_initializer( + mean, scale, dtype=tf_dtype, seed=seed)(shape) + return variable(value, dtype=dtype, name=name) + + +@keras_export('keras.backend.count_params') +def count_params(x): + """Returns the static number of elements in a variable or tensor. + + Arguments: + x: Variable or tensor. + + Returns: + Integer, the number of scalars in `x`. + + Example: + ```python + >>> kvar = K.zeros((2,3)) + >>> K.count_params(kvar) + 6 + >>> K.eval(kvar) + array([[ 0., 0., 0.], + [ 0., 0., 0.]], dtype=float32) + ``` + """ + return np.prod(x.shape.as_list()) + + +@keras_export('keras.backend.cast') +def cast(x, dtype): + """Casts a tensor to a different dtype and returns it. + + You can cast a Keras variable but it still returns a Keras tensor. + + Arguments: + x: Keras tensor (or variable). + dtype: String, either (`'float16'`, `'float32'`, or `'float64'`). + + Returns: + Keras tensor with dtype `dtype`. + + Examples: + Cast a float32 variable to a float64 tensor + + ```python + >>> import tensorflow as tf + >>> from tensorflow.keras import backend as K + >>> input = K.ones(shape=(1,3)) + >>> print(input) + >>> cast_input = K.cast(input, dtype='float64') + >>> print(cast_input) + + + tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64) + ``` + """ + return math_ops.cast(x, dtype) + + +# UPDATES OPS + + +@keras_export('keras.backend.update') +def update(x, new_x): + return state_ops.assign(x, new_x) + + +@keras_export('keras.backend.update_add') +def update_add(x, increment): + """Update the value of `x` by adding `increment`. + + Arguments: + x: A Variable. + increment: A tensor of same shape as `x`. + + Returns: + The variable `x` updated. + """ + return state_ops.assign_add(x, increment) + + +@keras_export('keras.backend.update_sub') +def update_sub(x, decrement): + """Update the value of `x` by subtracting `decrement`. + + Arguments: + x: A Variable. + decrement: A tensor of same shape as `x`. + + Returns: + The variable `x` updated. + """ + return state_ops.assign_sub(x, decrement) + + +@keras_export('keras.backend.moving_average_update') +def moving_average_update(x, value, momentum): + """Compute the moving average of a variable. + + Arguments: + x: A Variable. + value: A tensor with the same shape as `variable`. + momentum: The moving average momentum. + + Returns: + An Operation to update the variable. + """ + # `training` is higher-up than the Keras backend in the abstraction hierarchy. + # In particular, `training` depends on layers, and thus on Keras. + # moving_averages, being low-level ops, should not be part of the training + # module. + from tensorflow.python.training import moving_averages # pylint: disable=g-import-not-at-top + zero_debias = not tf2.enabled() + return moving_averages.assign_moving_average( + x, value, momentum, zero_debias=zero_debias) + + +# LINEAR ALGEBRA + + +@keras_export('keras.backend.dot') +def dot(x, y): + """Multiplies 2 tensors (and/or variables) and returns a *tensor*. + + When attempting to multiply a nD tensor + with a nD tensor, it reproduces the Theano behavior. + (e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`) + + Arguments: + x: Tensor or variable. + y: Tensor or variable. + + Returns: + A tensor, dot product of `x` and `y`. + + Examples: + ```python + # dot product between tensors + >>> x = K.placeholder(shape=(2, 3)) + >>> y = K.placeholder(shape=(3, 4)) + >>> xy = K.dot(x, y) + >>> xy + + ``` + + ```python + # dot product between tensors + >>> x = K.placeholder(shape=(32, 28, 3)) + >>> y = K.placeholder(shape=(3, 4)) + >>> xy = K.dot(x, y) + >>> xy + + ``` + + ```python + # Theano-like behavior example + >>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1) + >>> y = K.ones((4, 3, 5)) + >>> xy = K.dot(x, y) + >>> K.int_shape(xy) + (2, 4, 5) + ``` + """ + if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2): + x_shape = [] + for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))): + if i is not None: + x_shape.append(i) + else: + x_shape.append(s) + x_shape = tuple(x_shape) + y_shape = [] + for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))): + if i is not None: + y_shape.append(i) + else: + y_shape.append(s) + y_shape = tuple(y_shape) + y_permute_dim = list(range(ndim(y))) + y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim + xt = array_ops.reshape(x, [-1, x_shape[-1]]) + yt = array_ops.reshape( + array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1]) + return array_ops.reshape( + math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:]) + if is_sparse(x): + out = sparse_ops.sparse_tensor_dense_matmul(x, y) + else: + out = math_ops.matmul(x, y) + return out + + +@keras_export('keras.backend.batch_dot') +def batch_dot(x, y, axes=None): + """Batchwise dot product. + + `batch_dot` is used to compute dot product of `x` and `y` when + `x` and `y` are data in batch, i.e. in a shape of + `(batch_size, :)`. + `batch_dot` results in a tensor or variable with less dimensions + than the input. If the number of dimensions is reduced to 1, + we use `expand_dims` to make sure that ndim is at least 2. + + Arguments: + x: Keras tensor or variable with `ndim >= 2`. + y: Keras tensor or variable with `ndim >= 2`. + axes: Tuple or list of integers with target dimensions, or single integer. + The sizes of `x.shape[axes[0]]` and `y.shape[axes[1]]` should be equal. + + Returns: + A tensor with shape equal to the concatenation of `x`'s shape + (less the dimension that was summed over) and `y`'s shape + (less the batch dimension and the dimension that was summed over). + If the final rank is 1, we reshape it to `(batch_size, 1)`. + + Examples: + Assume `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]` + `batch_dot(x, y, axes=1) = [[17], [53]]` which is the main diagonal + of `x.dot(y.T)`, although we never have to calculate the off-diagonal + elements. + + Pseudocode: + ``` + inner_products = [] + for xi, yi in zip(x, y): + inner_products.append(xi.dot(yi)) + result = stack(inner_products) + ``` + + Shape inference: + Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`. + If `axes` is (1, 2), to find the output shape of resultant tensor, + loop through each dimension in `x`'s shape and `y`'s shape: + * `x.shape[0]` : 100 : append to output shape + * `x.shape[1]` : 20 : do not append to output shape, + dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1) + * `y.shape[0]` : 100 : do not append to output shape, + always ignore first dimension of `y` + * `y.shape[1]` : 30 : append to output shape + * `y.shape[2]` : 20 : do not append to output shape, + dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2) + `output_shape` = `(100, 30)` + + >>> x_batch = tf.keras.backend.ones(shape=(32, 20, 1)) + >>> y_batch = tf.keras.backend.ones(shape=(32, 30, 20)) + >>> xy_batch_dot = tf.keras.backend.batch_dot(x_batch, y_batch, axes=(1, 2)) + >>> tf.keras.backend.int_shape(xy_batch_dot) + (32, 1, 30) + """ + x_shape = int_shape(x) + y_shape = int_shape(y) + + x_ndim = len(x_shape) + y_ndim = len(y_shape) + + if x_ndim < 2 or y_ndim < 2: + raise ValueError('Cannot do batch_dot on inputs ' + 'with rank < 2. ' + 'Received inputs with shapes ' + + str(x_shape) + ' and ' + + str(y_shape) + '.') + + x_batch_size = x_shape[0] + y_batch_size = y_shape[0] + + if x_batch_size is not None and y_batch_size is not None: + if x_batch_size != y_batch_size: + raise ValueError('Cannot do batch_dot on inputs ' + 'with different batch sizes. ' + 'Received inputs with shapes ' + + str(x_shape) + ' and ' + + str(y_shape) + '.') + if isinstance(axes, int): + axes = [axes, axes] + + if axes is None: + if y_ndim == 2: + axes = [x_ndim - 1, y_ndim - 1] + else: + axes = [x_ndim - 1, y_ndim - 2] + + if py_any([isinstance(a, (list, tuple)) for a in axes]): + raise ValueError('Multiple target dimensions are not supported. ' + + 'Expected: None, int, (int, int), ' + + 'Provided: ' + str(axes)) + + # if tuple, convert to list. + axes = list(axes) + + # convert negative indices. + if axes[0] < 0: + axes[0] += x_ndim + if axes[1] < 0: + axes[1] += y_ndim + + # sanity checks + if 0 in axes: + raise ValueError('Cannot perform batch_dot over axis 0. ' + 'If your inputs are not batched, ' + 'add a dummy batch dimension to your ' + 'inputs using K.expand_dims(x, 0)') + a0, a1 = axes + d1 = x_shape[a0] + d2 = y_shape[a1] + + if d1 is not None and d2 is not None and d1 != d2: + raise ValueError('Cannot do batch_dot on inputs with shapes ' + + str(x_shape) + ' and ' + str(y_shape) + + ' with axes=' + str(axes) + '. x.shape[%d] != ' + 'y.shape[%d] (%d != %d).' % (axes[0], axes[1], d1, d2)) + + # backup ndims. Need them later. + orig_x_ndim = x_ndim + orig_y_ndim = y_ndim + + # if rank is 2, expand to 3. + if x_ndim == 2: + x = array_ops.expand_dims(x, 1) + a0 += 1 + x_ndim += 1 + if y_ndim == 2: + y = array_ops.expand_dims(y, 2) + y_ndim += 1 + + # bring x's dimension to be reduced to last axis. + if a0 != x_ndim - 1: + pattern = list(range(x_ndim)) + for i in range(a0, x_ndim - 1): + pattern[i] = pattern[i + 1] + pattern[-1] = a0 + x = array_ops.transpose(x, pattern) + + # bring y's dimension to be reduced to axis 1. + if a1 != 1: + pattern = list(range(y_ndim)) + for i in range(a1, 1, -1): + pattern[i] = pattern[i - 1] + pattern[1] = a1 + y = array_ops.transpose(y, pattern) + + # normalize both inputs to rank 3. + if x_ndim > 3: + # squash middle dimensions of x. + x_shape = shape(x) + x_mid_dims = x_shape[1:-1] + x_squashed_shape = array_ops.stack( + [x_shape[0], -1, x_shape[-1]]) + x = array_ops.reshape(x, x_squashed_shape) + x_squashed = True + else: + x_squashed = False + + if y_ndim > 3: + # squash trailing dimensions of y. + y_shape = shape(y) + y_trail_dims = y_shape[2:] + y_squashed_shape = array_ops.stack( + [y_shape[0], y_shape[1], -1]) + y = array_ops.reshape(y, y_squashed_shape) + y_squashed = True + else: + y_squashed = False + + result = math_ops.matmul(x, y) + + # if inputs were squashed, we have to reshape the matmul output. + output_shape = array_ops.shape(result) + do_reshape = False + + if x_squashed: + output_shape = array_ops.concat( + [output_shape[:1], + x_mid_dims, + output_shape[-1:]], 0) + do_reshape = True + + if y_squashed: + output_shape = array_ops.concat([output_shape[:-1], y_trail_dims], 0) + do_reshape = True + + if do_reshape: + result = array_ops.reshape(result, output_shape) + + # if the inputs were originally rank 2, we remove the added 1 dim. + if orig_x_ndim == 2: + result = array_ops.squeeze(result, 1) + elif orig_y_ndim == 2: + result = array_ops.squeeze(result, -1) + + return result + + +@keras_export('keras.backend.transpose') +def transpose(x): + """Transposes a tensor and returns it. + + Arguments: + x: Tensor or variable. + + Returns: + A tensor. + + Examples: + ```python + >>> var = K.variable([[1, 2, 3], [4, 5, 6]]) + >>> K.eval(var) + array([[ 1., 2., 3.], + [ 4., 5., 6.]], dtype=float32) + >>> var_transposed = K.transpose(var) + >>> K.eval(var_transposed) + array([[ 1., 4.], + [ 2., 5.], + [ 3., 6.]], dtype=float32) + ``` + + ```python + >>> input = K.placeholder((2, 3)) + >>> input + + >>> input_transposed = K.transpose(input) + >>> input_transposed + + + ``` + """ + return array_ops.transpose(x) + + +@keras_export('keras.backend.gather') +def gather(reference, indices): + """Retrieves the elements of indices `indices` in the tensor `reference`. + + Arguments: + reference: A tensor. + indices: An integer tensor of indices. + + Returns: + A tensor of same type as `reference`. + """ + return array_ops.gather(reference, indices) + + +# ELEMENT-WISE OPERATIONS + + +@keras_export('keras.backend.max') +def max(x, axis=None, keepdims=False): + """Maximum value in a tensor. + + Arguments: + x: A tensor or variable. + axis: An integer, the axis to find maximum values. + keepdims: A boolean, whether to keep the dimensions or not. + If `keepdims` is `False`, the rank of the tensor is reduced + by 1. If `keepdims` is `True`, + the reduced dimension is retained with length 1. + + Returns: + A tensor with maximum values of `x`. + """ + return math_ops.reduce_max(x, axis, keepdims) + + +@keras_export('keras.backend.min') +def min(x, axis=None, keepdims=False): + """Minimum value in a tensor. + + Arguments: + x: A tensor or variable. + axis: An integer, the axis to find minimum values. + keepdims: A boolean, whether to keep the dimensions or not. + If `keepdims` is `False`, the rank of the tensor is reduced + by 1. If `keepdims` is `True`, + the reduced dimension is retained with length 1. + + Returns: + A tensor with minimum values of `x`. + """ + return math_ops.reduce_min(x, axis, keepdims) + + +@keras_export('keras.backend.sum') +def sum(x, axis=None, keepdims=False): + """Sum of the values in a tensor, alongside the specified axis. + + Arguments: + x: A tensor or variable. + axis: An integer, the axis to sum over. + keepdims: A boolean, whether to keep the dimensions or not. + If `keepdims` is `False`, the rank of the tensor is reduced + by 1. If `keepdims` is `True`, + the reduced dimension is retained with length 1. + + Returns: + A tensor with sum of `x`. + """ + return math_ops.reduce_sum(x, axis, keepdims) + + +@keras_export('keras.backend.prod') +def prod(x, axis=None, keepdims=False): + """Multiplies the values in a tensor, alongside the specified axis. + + Arguments: + x: A tensor or variable. + axis: An integer, the axis to compute the product. + keepdims: A boolean, whether to keep the dimensions or not. + If `keepdims` is `False`, the rank of the tensor is reduced + by 1. If `keepdims` is `True`, + the reduced dimension is retained with length 1. + + Returns: + A tensor with the product of elements of `x`. + """ + return math_ops.reduce_prod(x, axis, keepdims) + + +@keras_export('keras.backend.cumsum') +def cumsum(x, axis=0): + """Cumulative sum of the values in a tensor, alongside the specified axis. + + Arguments: + x: A tensor or variable. + axis: An integer, the axis to compute the sum. + + Returns: + A tensor of the cumulative sum of values of `x` along `axis`. + """ + return math_ops.cumsum(x, axis=axis) + + +@keras_export('keras.backend.cumprod') +def cumprod(x, axis=0): + """Cumulative product of the values in a tensor, alongside the specified axis. + + Arguments: + x: A tensor or variable. + axis: An integer, the axis to compute the product. + + Returns: + A tensor of the cumulative product of values of `x` along `axis`. + """ + return math_ops.cumprod(x, axis=axis) + + +@keras_export('keras.backend.var') +def var(x, axis=None, keepdims=False): + """Variance of a tensor, alongside the specified axis. + + Arguments: + x: A tensor or variable. + axis: An integer, the axis to compute the variance. + keepdims: A boolean, whether to keep the dimensions or not. + If `keepdims` is `False`, the rank of the tensor is reduced + by 1. If `keepdims` is `True`, + the reduced dimension is retained with length 1. + + Returns: + A tensor with the variance of elements of `x`. + """ + if x.dtype.base_dtype == dtypes_module.bool: + x = math_ops.cast(x, floatx()) + return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims) + + +@keras_export('keras.backend.std') +def std(x, axis=None, keepdims=False): + """Standard deviation of a tensor, alongside the specified axis. + + Arguments: + x: A tensor or variable. + axis: An integer, the axis to compute the standard deviation. + keepdims: A boolean, whether to keep the dimensions or not. + If `keepdims` is `False`, the rank of the tensor is reduced + by 1. If `keepdims` is `True`, + the reduced dimension is retained with length 1. + + Returns: + A tensor with the standard deviation of elements of `x`. + """ + if x.dtype.base_dtype == dtypes_module.bool: + x = math_ops.cast(x, floatx()) + return math_ops.reduce_std(x, axis=axis, keepdims=keepdims) + + +@keras_export('keras.backend.mean') +def mean(x, axis=None, keepdims=False): + """Mean of a tensor, alongside the specified axis. + + Arguments: + x: A tensor or variable. + axis: A list of integer. Axes to compute the mean. + keepdims: A boolean, whether to keep the dimensions or not. + If `keepdims` is `False`, the rank of the tensor is reduced + by 1 for each entry in `axis`. If `keepdims` is `True`, + the reduced dimensions are retained with length 1. + + Returns: + A tensor with the mean of elements of `x`. + """ + if x.dtype.base_dtype == dtypes_module.bool: + x = math_ops.cast(x, floatx()) + return math_ops.reduce_mean(x, axis, keepdims) + + +@keras_export('keras.backend.any') +def any(x, axis=None, keepdims=False): + """Bitwise reduction (logical OR). + + Arguments: + x: Tensor or variable. + axis: axis along which to perform the reduction. + keepdims: whether the drop or broadcast the reduction axes. + + Returns: + A uint8 tensor (0s and 1s). + """ + x = math_ops.cast(x, dtypes_module.bool) + return math_ops.reduce_any(x, axis, keepdims) + + +@keras_export('keras.backend.all') +def all(x, axis=None, keepdims=False): + """Bitwise reduction (logical AND). + + Arguments: + x: Tensor or variable. + axis: axis along which to perform the reduction. + keepdims: whether the drop or broadcast the reduction axes. + + Returns: + A uint8 tensor (0s and 1s). + """ + x = math_ops.cast(x, dtypes_module.bool) + return math_ops.reduce_all(x, axis, keepdims) + + +@keras_export('keras.backend.argmax') +def argmax(x, axis=-1): + """Returns the index of the maximum value along an axis. + + Arguments: + x: Tensor or variable. + axis: axis along which to perform the reduction. + + Returns: + A tensor. + """ + return math_ops.argmax(x, axis) + + +@keras_export('keras.backend.argmin') +def argmin(x, axis=-1): + """Returns the index of the minimum value along an axis. + + Arguments: + x: Tensor or variable. + axis: axis along which to perform the reduction. + + Returns: + A tensor. + """ + return math_ops.argmin(x, axis) + + +@keras_export('keras.backend.square') +def square(x): + """Element-wise square. + + Arguments: + x: Tensor or variable. + + Returns: + A tensor. + """ + return math_ops.square(x) + + +@keras_export('keras.backend.abs') +def abs(x): + """Element-wise absolute value. + + Arguments: + x: Tensor or variable. + + Returns: + A tensor. + """ + return math_ops.abs(x) + + +@keras_export('keras.backend.sqrt') +def sqrt(x): + """Element-wise square root. + + Arguments: + x: Tensor or variable. + + Returns: + A tensor. + """ + zero = _constant_to_tensor(0., x.dtype.base_dtype) + inf = _constant_to_tensor(np.inf, x.dtype.base_dtype) + x = clip_ops.clip_by_value(x, zero, inf) + return math_ops.sqrt(x) + + +@keras_export('keras.backend.exp') +def exp(x): + """Element-wise exponential. + + Arguments: + x: Tensor or variable. + + Returns: + A tensor. + """ + return math_ops.exp(x) + + +@keras_export('keras.backend.log') +def log(x): + """Element-wise log. + + Arguments: + x: Tensor or variable. + + Returns: + A tensor. + """ + return math_ops.log(x) + + +def logsumexp(x, axis=None, keepdims=False): + """Computes log(sum(exp(elements across dimensions of a tensor))). + + This function is more numerically stable than log(sum(exp(x))). + It avoids overflows caused by taking the exp of large inputs and + underflows caused by taking the log of small inputs. + + Arguments: + x: A tensor or variable. + axis: An integer, the axis to reduce over. + keepdims: A boolean, whether to keep the dimensions or not. + If `keepdims` is `False`, the rank of the tensor is reduced + by 1. If `keepdims` is `True`, the reduced dimension is + retained with length 1. + + Returns: + The reduced tensor. + """ + return math_ops.reduce_logsumexp(x, axis, keepdims) + + +@keras_export('keras.backend.round') +def round(x): + """Element-wise rounding to the closest integer. + + In case of tie, the rounding mode used is "half to even". + + Arguments: + x: Tensor or variable. + + Returns: + A tensor. + """ + return math_ops.round(x) + + +@keras_export('keras.backend.sign') +def sign(x): + """Element-wise sign. + + Arguments: + x: Tensor or variable. + + Returns: + A tensor. + """ + return math_ops.sign(x) + + +@keras_export('keras.backend.pow') +def pow(x, a): + """Element-wise exponentiation. + + Arguments: + x: Tensor or variable. + a: Python integer. + + Returns: + A tensor. + """ + return math_ops.pow(x, a) + + +@keras_export('keras.backend.clip') +def clip(x, min_value, max_value): + """Element-wise value clipping. + + Arguments: + x: Tensor or variable. + min_value: Python float or integer. + max_value: Python float or integer. + + Returns: + A tensor. + """ + if max_value is not None and max_value < min_value: + max_value = min_value + if max_value is None: + max_value = np.inf + min_value = _constant_to_tensor(min_value, x.dtype.base_dtype) + max_value = _constant_to_tensor(max_value, x.dtype.base_dtype) + return clip_ops.clip_by_value(x, min_value, max_value) + + +@keras_export('keras.backend.equal') +def equal(x, y): + """Element-wise equality between two tensors. + + Arguments: + x: Tensor or variable. + y: Tensor or variable. + + Returns: + A bool tensor. + """ + return math_ops.equal(x, y) + + +@keras_export('keras.backend.not_equal') +def not_equal(x, y): + """Element-wise inequality between two tensors. + + Arguments: + x: Tensor or variable. + y: Tensor or variable. + + Returns: + A bool tensor. + """ + return math_ops.not_equal(x, y) + + +@keras_export('keras.backend.greater') +def greater(x, y): + """Element-wise truth value of (x > y). + + Arguments: + x: Tensor or variable. + y: Tensor or variable. + + Returns: + A bool tensor. + """ + return math_ops.greater(x, y) + + +@keras_export('keras.backend.greater_equal') +def greater_equal(x, y): + """Element-wise truth value of (x >= y). + + Arguments: + x: Tensor or variable. + y: Tensor or variable. + + Returns: + A bool tensor. + """ + return math_ops.greater_equal(x, y) + + +@keras_export('keras.backend.less') +def less(x, y): + """Element-wise truth value of (x < y). + + Arguments: + x: Tensor or variable. + y: Tensor or variable. + + Returns: + A bool tensor. + """ + return math_ops.less(x, y) + + +@keras_export('keras.backend.less_equal') +def less_equal(x, y): + """Element-wise truth value of (x <= y). + + Arguments: + x: Tensor or variable. + y: Tensor or variable. + + Returns: + A bool tensor. + """ + return math_ops.less_equal(x, y) + + +@keras_export('keras.backend.maximum') +def maximum(x, y): + """Element-wise maximum of two tensors. + + Arguments: + x: Tensor or variable. + y: Tensor or variable. + + Returns: + A tensor with the element wise maximum value(s) of `x` and `y`. + + Examples: + ```python + # maximum of two tensors + >>> x = tf.Variable([[1, 2], [3, 4]]) + >>> y = tf.Variable([[2, 1], [0, -1]]) + >>> m = tf.keras.backend.maximum(x, y) + >>> m + + ``` + """ + return math_ops.maximum(x, y) + + +@keras_export('keras.backend.minimum') +def minimum(x, y): + """Element-wise minimum of two tensors. + + Arguments: + x: Tensor or variable. + y: Tensor or variable. + + Returns: + A tensor. + """ + return math_ops.minimum(x, y) + + +@keras_export('keras.backend.sin') +def sin(x): + """Computes sin of x element-wise. + + Arguments: + x: Tensor or variable. + + Returns: + A tensor. + """ + return math_ops.sin(x) + + +@keras_export('keras.backend.cos') +def cos(x): + """Computes cos of x element-wise. + + Arguments: + x: Tensor or variable. + + Returns: + A tensor. + """ + return math_ops.cos(x) + + +def _regular_normalize_batch_in_training(x, + gamma, + beta, + reduction_axes, + epsilon=1e-3): + """Non-fused version of `normalize_batch_in_training`. + + Arguments: + x: Input tensor or variable. + gamma: Tensor by which to scale the input. + beta: Tensor with which to center the input. + reduction_axes: iterable of integers, + axes over which to normalize. + epsilon: Fuzz factor. + + Returns: + A tuple length of 3, `(normalized_tensor, mean, variance)`. + """ + mean, var = nn.moments(x, reduction_axes, None, None, False) + normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon) + return normed, mean, var + + +def _broadcast_normalize_batch_in_training(x, + gamma, + beta, + reduction_axes, + epsilon=1e-3): + """Non-fused, broadcast version of `normalize_batch_in_training`. + + Arguments: + x: Input tensor or variable. + gamma: Tensor by which to scale the input. + beta: Tensor with which to center the input. + reduction_axes: iterable of integers, + axes over which to normalize. + epsilon: Fuzz factor. + + Returns: + A tuple length of 3, `(normalized_tensor, mean, variance)`. + """ + mean, var = nn.moments(x, reduction_axes, None, None, False) + target_shape = [] + for axis in range(ndim(x)): + if axis in reduction_axes: + target_shape.append(1) + else: + target_shape.append(array_ops.shape(x)[axis]) + target_shape = array_ops.stack(target_shape) + + broadcast_mean = array_ops.reshape(mean, target_shape) + broadcast_var = array_ops.reshape(var, target_shape) + if gamma is None: + broadcast_gamma = None + else: + broadcast_gamma = array_ops.reshape(gamma, target_shape) + if beta is None: + broadcast_beta = None + else: + broadcast_beta = array_ops.reshape(beta, target_shape) + + normed = nn.batch_normalization(x, broadcast_mean, broadcast_var, + broadcast_beta, broadcast_gamma, epsilon) + return normed, mean, var + + +def _fused_normalize_batch_in_training(x, + gamma, + beta, + reduction_axes, + epsilon=1e-3): + """Fused version of `normalize_batch_in_training`. + + Arguments: + x: Input tensor or variable. + gamma: Tensor by which to scale the input. + beta: Tensor with which to center the input. + reduction_axes: iterable of integers, + axes over which to normalize. + epsilon: Fuzz factor. + + Returns: + A tuple length of 3, `(normalized_tensor, mean, variance)`. + """ + if list(reduction_axes) == [0, 1, 2]: + normalization_axis = 3 + tf_data_format = 'NHWC' + else: + normalization_axis = 1 + tf_data_format = 'NCHW' + + if gamma is None: + gamma = constant_op.constant( + 1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]]) + if beta is None: + beta = constant_op.constant( + 0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]]) + + return nn.fused_batch_norm( + x, gamma, beta, epsilon=epsilon, data_format=tf_data_format) + + +@keras_export('keras.backend.normalize_batch_in_training') +def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): + """Computes mean and std for batch then apply batch_normalization on batch. + + Arguments: + x: Input tensor or variable. + gamma: Tensor by which to scale the input. + beta: Tensor with which to center the input. + reduction_axes: iterable of integers, + axes over which to normalize. + epsilon: Fuzz factor. + + Returns: + A tuple length of 3, `(normalized_tensor, mean, variance)`. + """ + if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]: + if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]: + return _broadcast_normalize_batch_in_training( + x, gamma, beta, reduction_axes, epsilon=epsilon) + return _fused_normalize_batch_in_training( + x, gamma, beta, reduction_axes, epsilon=epsilon) + else: + if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: + return _regular_normalize_batch_in_training( + x, gamma, beta, reduction_axes, epsilon=epsilon) + else: + return _broadcast_normalize_batch_in_training( + x, gamma, beta, reduction_axes, epsilon=epsilon) + + +@keras_export('keras.backend.batch_normalization') +def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3): + """Applies batch normalization on x given mean, var, beta and gamma. + + I.e. returns: + `output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta` + + Arguments: + x: Input tensor or variable. + mean: Mean of batch. + var: Variance of batch. + beta: Tensor with which to center the input. + gamma: Tensor by which to scale the input. + axis: Integer, the axis that should be normalized. + (typically the features axis). + epsilon: Fuzz factor. + + Returns: + A tensor. + """ + if ndim(x) == 4: + # The CPU implementation of `fused_batch_norm` only supports NHWC + if axis == 1 or axis == -3: + tf_data_format = 'NCHW' + elif axis == 3 or axis == -1: + tf_data_format = 'NHWC' + else: + tf_data_format = None + + if (tf_data_format == 'NHWC' or + tf_data_format == 'NCHW' and _has_nchw_support()): + # The mean / var / beta / gamma tensors may be broadcasted + # so they may have extra axes of size 1, which should be squeezed. + if ndim(mean) > 1: + mean = array_ops.reshape(mean, [-1]) + if ndim(var) > 1: + var = array_ops.reshape(var, [-1]) + if beta is None: + beta = zeros_like(mean) + elif ndim(beta) > 1: + beta = array_ops.reshape(beta, [-1]) + if gamma is None: + gamma = ones_like(mean) + elif ndim(gamma) > 1: + gamma = array_ops.reshape(gamma, [-1]) + y, _, _ = nn.fused_batch_norm( + x, + gamma, + beta, + epsilon=epsilon, + mean=mean, + variance=var, + data_format=tf_data_format, + is_training=False + ) + return y + return nn.batch_normalization(x, mean, var, beta, gamma, epsilon) + + +# SHAPE OPERATIONS + + +@keras_export('keras.backend.concatenate') +def concatenate(tensors, axis=-1): + """Concatenates a list of tensors alongside the specified axis. + + Arguments: + tensors: list of tensors to concatenate. + axis: concatenation axis. + + Returns: + A tensor. + + Example: + ```python + >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> b = tf.constant([[10, 20, 30], [40, 50, 60], [70, 80, 90]]) + >>> tf.keras.backend.concatenate((a, b), axis=-1) + + ``` + """ + if axis < 0: + rank = ndim(tensors[0]) + if rank: + axis %= rank + else: + axis = 0 + + if py_all(is_sparse(x) for x in tensors): + return sparse_ops.sparse_concat(axis, tensors) + else: + return array_ops.concat([to_dense(x) for x in tensors], axis) + + +@keras_export('keras.backend.reshape') +def reshape(x, shape): + """Reshapes a tensor to the specified shape. + + Arguments: + x: Tensor or variable. + shape: Target shape tuple. + + Returns: + A tensor. + + Example: + ```python + >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) + >>> a + + >>> tf.keras.backend.reshape(a, shape=(2, 6)) + + ``` + """ + return array_ops.reshape(x, shape) + + +@keras_export('keras.backend.permute_dimensions') +def permute_dimensions(x, pattern): + """Permutes axes in a tensor. + + Arguments: + x: Tensor or variable. + pattern: A tuple of + dimension indices, e.g. `(0, 2, 1)`. + + Returns: + A tensor. + + Example: + ```python + >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) + >>> a + + >>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0)) + + ``` + """ + return array_ops.transpose(x, perm=pattern) + + +@keras_export('keras.backend.resize_images') +def resize_images(x, height_factor, width_factor, data_format, + interpolation='nearest'): + """Resizes the images contained in a 4D tensor. + + Arguments: + x: Tensor or variable to resize. + height_factor: Positive integer. + width_factor: Positive integer. + data_format: One of `"channels_first"`, `"channels_last"`. + interpolation: A string, one of `nearest` or `bilinear`. + + Returns: + A tensor. + + Raises: + ValueError: in case of incorrect value for + `data_format` or `interpolation`. + """ + if data_format == 'channels_first': + rows, cols = 2, 3 + elif data_format == 'channels_last': + rows, cols = 1, 2 + else: + raise ValueError('Invalid `data_format` argument: %s' % (data_format,)) + + original_shape = int_shape(x) + new_shape = array_ops.shape(x)[rows:cols + 1] + new_shape *= constant_op.constant( + np.array([height_factor, width_factor], dtype='int32')) + + if data_format == 'channels_first': + x = permute_dimensions(x, [0, 2, 3, 1]) + if interpolation == 'nearest': + x = image_ops.resize_images_v2( + x, new_shape, method=image_ops.ResizeMethod.NEAREST_NEIGHBOR) + elif interpolation == 'bilinear': + x = image_ops.resize_images_v2(x, new_shape, + method=image_ops.ResizeMethod.BILINEAR) + else: + raise ValueError('interpolation should be one ' + 'of "nearest" or "bilinear".') + if data_format == 'channels_first': + x = permute_dimensions(x, [0, 3, 1, 2]) + + if original_shape[rows] is None: + new_height = None + else: + new_height = original_shape[rows] * height_factor + + if original_shape[cols] is None: + new_width = None + else: + new_width = original_shape[cols] * width_factor + + if data_format == 'channels_first': + output_shape = (None, None, new_height, new_width) + else: + output_shape = (None, new_height, new_width, None) + x.set_shape(output_shape) + return x + + +@keras_export('keras.backend.resize_volumes') +def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): + """Resizes the volume contained in a 5D tensor. + + Arguments: + x: Tensor or variable to resize. + depth_factor: Positive integer. + height_factor: Positive integer. + width_factor: Positive integer. + data_format: One of `"channels_first"`, `"channels_last"`. + + Returns: + A tensor. + + Raises: + ValueError: if `data_format` is neither + `channels_last` or `channels_first`. + """ + if data_format == 'channels_first': + output = repeat_elements(x, depth_factor, axis=2) + output = repeat_elements(output, height_factor, axis=3) + output = repeat_elements(output, width_factor, axis=4) + return output + elif data_format == 'channels_last': + output = repeat_elements(x, depth_factor, axis=1) + output = repeat_elements(output, height_factor, axis=2) + output = repeat_elements(output, width_factor, axis=3) + return output + else: + raise ValueError('Invalid data_format: ' + str(data_format)) + + +@keras_export('keras.backend.repeat_elements') +def repeat_elements(x, rep, axis): + """Repeats the elements of a tensor along an axis, like `np.repeat`. + + If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output + will have shape `(s1, s2 * rep, s3)`. + + Arguments: + x: Tensor or variable. + rep: Python integer, number of times to repeat. + axis: Axis along which to repeat. + + Returns: + A tensor. + + Example: + ```python + >>> b = tf.constant([1, 2, 3]) + >>> tf.keras.backend.repeat_elements(b, rep=2, axis=0) + + ``` + """ + x_shape = x.shape.as_list() + # For static axis + if x_shape[axis] is not None: + # slices along the repeat axis + splits = array_ops.split(value=x, + num_or_size_splits=x_shape[axis], + axis=axis) + # repeat each slice the given number of reps + x_rep = [s for s in splits for _ in range(rep)] + return concatenate(x_rep, axis) + + # Here we use tf.tile to mimic behavior of np.repeat so that + # we can handle dynamic shapes (that include None). + # To do that, we need an auxiliary axis to repeat elements along + # it and then merge them along the desired axis. + + # Repeating + auxiliary_axis = axis + 1 + x_shape = array_ops.shape(x) + x_rep = array_ops.expand_dims(x, axis=auxiliary_axis) + reps = np.ones(len(x.shape) + 1) + reps[auxiliary_axis] = rep + x_rep = array_ops.tile(x_rep, reps) + + # Merging + reps = np.delete(reps, auxiliary_axis) + reps[axis] = rep + reps = array_ops.constant(reps, dtype='int32') + x_shape *= reps + x_rep = array_ops.reshape(x_rep, x_shape) + + # Fix shape representation + x_shape = x.shape.as_list() + x_rep.set_shape(x_shape) + x_rep._keras_shape = tuple(x_shape) + return x_rep + + +@keras_export('keras.backend.repeat') +def repeat(x, n): + """Repeats a 2D tensor. + + if `x` has shape (samples, dim) and `n` is `2`, + the output will have shape `(samples, 2, dim)`. + + Arguments: + x: Tensor or variable. + n: Python integer, number of times to repeat. + + Returns: + A tensor. + + Example: + ```python + >>> b = tf.constant([[1, 2], [3, 4]]) + >>> b + + >>> tf.keras.backend.repeat(b, n=2) + + ``` + """ + assert ndim(x) == 2 + x = array_ops.expand_dims(x, 1) + pattern = array_ops.stack([1, n, 1]) + return array_ops.tile(x, pattern) + + +@keras_export('keras.backend.arange') +def arange(start, stop=None, step=1, dtype='int32'): + """Creates a 1D tensor containing a sequence of integers. + + The function arguments use the same convention as + Theano's arange: if only one argument is provided, + it is in fact the "stop" argument and "start" is 0. + + The default type of the returned tensor is `'int32'` to + match TensorFlow's default. + + Arguments: + start: Start value. + stop: Stop value. + step: Difference between two successive values. + dtype: Integer dtype to use. + + Returns: + An integer tensor. + + Example: + ```python + >>> tf.keras.backend.arange(start=0, stop=10, step=1.5) + + + ``` + + """ + # Match the behavior of numpy and Theano by returning an empty sequence. + if stop is None and start < 0: + start = 0 + result = math_ops.range(start, limit=stop, delta=step, name='arange') + if dtype != 'int32': + result = cast(result, dtype) + return result + + +@keras_export('keras.backend.tile') +def tile(x, n): + """Creates a tensor by tiling `x` by `n`. + + Arguments: + x: A tensor or variable + n: A list of integer. The length must be the same as the number of + dimensions in `x`. + + Returns: + A tiled tensor. + """ + if isinstance(n, int): + n = [n] + return array_ops.tile(x, n) + + +@keras_export('keras.backend.flatten') +def flatten(x): + """Flatten a tensor. + + Arguments: + x: A tensor or variable. + + Returns: + A tensor, reshaped into 1-D + + Example: + ```python + >>> b = tf.constant([[1, 2], [3, 4]]) + >>> b + + >>> tf.keras.backend.flatten(b) + + ``` + """ + return array_ops.reshape(x, [-1]) + + +@keras_export('keras.backend.batch_flatten') +def batch_flatten(x): + """Turn a nD tensor into a 2D tensor with same 0th dimension. + + In other words, it flattens each data samples of a batch. + + Arguments: + x: A tensor or variable. + + Returns: + A tensor. + + Examples: + Flattening a 3D tensor to 2D by collapsing the last dimension. + + ```python + >>> from tensorflow.keras import backend as K + >>> x_batch = K.ones(shape=(2, 3, 4, 5)) + >>> x_batch_flatten = K.batch_flatten(x_batch) + >>> K.int_shape(x_batch_flatten) + (2, 60) + ``` + """ + x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])])) + return x + + +@keras_export('keras.backend.expand_dims') +def expand_dims(x, axis=-1): + """Adds a 1-sized dimension at index "axis". + + Arguments: + x: A tensor or variable. + axis: Position where to add a new axis. + + Returns: + A tensor with expanded dimensions. + """ + return array_ops.expand_dims(x, axis) + + +@keras_export('keras.backend.squeeze') +def squeeze(x, axis): + """Removes a 1-dimension from the tensor at index "axis". + + Arguments: + x: A tensor or variable. + axis: Axis to drop. + + Returns: + A tensor with the same data as `x` but reduced dimensions. + """ + return array_ops.squeeze(x, [axis]) + + +@keras_export('keras.backend.temporal_padding') +def temporal_padding(x, padding=(1, 1)): + """Pads the middle dimension of a 3D tensor. + + Arguments: + x: Tensor or variable. + padding: Tuple of 2 integers, how many zeros to + add at the start and end of dim 1. + + Returns: + A padded 3D tensor. + """ + assert len(padding) == 2 + pattern = [[0, 0], [padding[0], padding[1]], [0, 0]] + return array_ops.pad(x, pattern) + + +@keras_export('keras.backend.spatial_2d_padding') +def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): + """Pads the 2nd and 3rd dimensions of a 4D tensor. + + Arguments: + x: Tensor or variable. + padding: Tuple of 2 tuples, padding pattern. + data_format: One of `channels_last` or `channels_first`. + + Returns: + A padded 4D tensor. + + Raises: + ValueError: if `data_format` is neither + `channels_last` or `channels_first`. + """ + assert len(padding) == 2 + assert len(padding[0]) == 2 + assert len(padding[1]) == 2 + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + + if data_format == 'channels_first': + pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])] + else: + pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]] + return array_ops.pad(x, pattern) + + +@keras_export('keras.backend.spatial_3d_padding') +def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): + """Pads 5D tensor with zeros along the depth, height, width dimensions. + + Pads these dimensions with respectively + "padding[0]", "padding[1]" and "padding[2]" zeros left and right. + + For 'channels_last' data_format, + the 2nd, 3rd and 4th dimension will be padded. + For 'channels_first' data_format, + the 3rd, 4th and 5th dimension will be padded. + + Arguments: + x: Tensor or variable. + padding: Tuple of 3 tuples, padding pattern. + data_format: One of `channels_last` or `channels_first`. + + Returns: + A padded 5D tensor. + + Raises: + ValueError: if `data_format` is neither + `channels_last` or `channels_first`. + + """ + assert len(padding) == 3 + assert len(padding[0]) == 2 + assert len(padding[1]) == 2 + assert len(padding[2]) == 2 + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + + if data_format == 'channels_first': + pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]], + [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]] + else: + pattern = [[0, 0], [padding[0][0], padding[0][1]], + [padding[1][0], padding[1][1]], [padding[2][0], + padding[2][1]], [0, 0]] + return array_ops.pad(x, pattern) + + +@keras_export('keras.backend.stack') +def stack(x, axis=0): + """Stacks a list of rank `R` tensors into a rank `R+1` tensor. + + Arguments: + x: List of tensors. + axis: Axis along which to perform stacking. + + Returns: + A tensor. + + Example: + ```python + >>> a = tf.constant([[1, 2],[3, 4]]) + >>> b = tf.constant([[10, 20],[30, 40]]) + >>> tf.keras.backend.stack((a, b)) + + ``` + """ + return array_ops.stack(x, axis=axis) + + +@keras_export('keras.backend.one_hot') +def one_hot(indices, num_classes): + """Computes the one-hot representation of an integer tensor. + + Arguments: + indices: nD integer tensor of shape + `(batch_size, dim1, dim2, ... dim(n-1))` + num_classes: Integer, number of classes to consider. + + Returns: + (n + 1)D one hot representation of the input + with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)` + + Returns: + The one-hot tensor. + """ + return array_ops.one_hot(indices, depth=num_classes, axis=-1) + + +@keras_export('keras.backend.reverse') +def reverse(x, axes): + """Reverse a tensor along the specified axes. + + Arguments: + x: Tensor to reverse. + axes: Integer or iterable of integers. + Axes to reverse. + + Returns: + A tensor. + """ + if isinstance(axes, int): + axes = [axes] + return array_ops.reverse(x, axes) + + +# VALUE MANIPULATION + + +@keras_export('keras.backend.get_value') +def get_value(x): + """Returns the value of a variable. + + Arguments: + x: input variable. + + Returns: + A Numpy array. + """ + if not tensor_util.is_tensor(x): + return x + if context.executing_eagerly() or isinstance(x, ops.EagerTensor): + return x.numpy() + if not getattr(x, '_in_graph_mode', True): + # This is a variable which was created in an eager context, but is being + # evaluated from a Graph. + with context.eager_mode(): + return x.numpy() + + if ops.executing_eagerly_outside_functions(): + # This method of evaluating works inside the Keras FuncGraph. + return function([], x)(x) + + with x.graph.as_default(): + return x.eval(session=get_session((x,))) + + +@keras_export('keras.backend.batch_get_value') +def batch_get_value(tensors): + """Returns the value of more than one tensor variable. + + Arguments: + tensors: list of ops to run. + + Returns: + A list of Numpy arrays. + + Raises: + RuntimeError: If this method is called inside defun. + """ + if context.executing_eagerly(): + return [x.numpy() for x in tensors] + elif ops.inside_function(): # pylint: disable=protected-access + raise RuntimeError('Cannot get value inside Tensorflow graph function.') + if tensors: + return get_session(tensors).run(tensors) + else: + return [] + + +@keras_export('keras.backend.set_value') +def set_value(x, value): + """Sets the value of a variable, from a Numpy array. + + Arguments: + x: Tensor to set to a new value. + value: Value to set the tensor to, as a Numpy array + (of the same shape). + """ + value = np.asarray(value, dtype=dtype(x)) + if ops.executing_eagerly_outside_functions(): + with ops.init_scope(): + x.assign(value) + else: + with get_graph().as_default(): + tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0]) + if hasattr(x, '_assign_placeholder'): + assign_placeholder = x._assign_placeholder + assign_op = x._assign_op + else: + assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape) + assign_op = x.assign(assign_placeholder) + x._assign_placeholder = assign_placeholder + x._assign_op = assign_op + get_session().run(assign_op, feed_dict={assign_placeholder: value}) + + +@keras_export('keras.backend.batch_set_value') +def batch_set_value(tuples): + """Sets the values of many tensor variables at once. + + Arguments: + tuples: a list of tuples `(tensor, value)`. + `value` should be a Numpy array. + """ + if ops.executing_eagerly_outside_functions(): + with ops.init_scope(): + for x, value in tuples: + x.assign(np.asarray(value, dtype=dtype(x))) + else: + with get_graph().as_default(): + if tuples: + assign_ops = [] + feed_dict = {} + for x, value in tuples: + value = np.asarray(value, dtype=dtype(x)) + tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0]) + if hasattr(x, '_assign_placeholder'): + assign_placeholder = x._assign_placeholder + assign_op = x._assign_op + else: + assign_placeholder = array_ops.placeholder(tf_dtype, + shape=value.shape) + assign_op = x.assign(assign_placeholder) + x._assign_placeholder = assign_placeholder + x._assign_op = assign_op + assign_ops.append(assign_op) + feed_dict[assign_placeholder] = value + get_session().run(assign_ops, feed_dict=feed_dict) + + +@keras_export('keras.backend.print_tensor') +def print_tensor(x, message=''): + """Prints `message` and the tensor value when evaluated. + + Note that `print_tensor` returns a new tensor identical to `x` + which should be used in the following code. Otherwise the + print operation is not taken into account during evaluation. + + Example: + + ```python + >>> x = K.print_tensor(x, message="x is: ") + ``` + + Arguments: + x: Tensor to print. + message: Message to print jointly with the tensor. + + Returns: + The same tensor `x`, unchanged. + """ + if isinstance(x, ops.Tensor) and hasattr(x, 'graph'): + with get_graph().as_default(): + op = logging_ops.print_v2(message, x, output_stream=sys.stdout) + with ops.control_dependencies([op]): + return array_ops.identity(x) + else: + logging_ops.print_v2(message, x, output_stream=sys.stdout) + return x + +# GRAPH MANIPULATION + + +class GraphExecutionFunction(object): + """Runs a computation graph. + + It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`. + In particular additional operations via `fetches` argument and additional + tensor substitutions via `feed_dict` arguments. Note that given + substitutions are merged with substitutions from `inputs`. Even though + `feed_dict` is passed once in the constructor (called in `model.compile()`) + we can modify the values in the dictionary. Through this feed_dict we can + provide additional substitutions besides Keras inputs. + + Arguments: + inputs: Feed placeholders to the computation graph. + outputs: Output tensors to fetch. + updates: Additional update ops to be run at function call. + name: A name to help users identify what this function does. + session_kwargs: Arguments to `tf.Session.run()`: + `fetches`, `feed_dict`, `options`, `run_metadata`. + """ + + def __init__(self, inputs, outputs, updates=None, name=None, + **session_kwargs): + updates = updates or [] + if not isinstance(updates, (list, tuple)): + raise TypeError('`updates` in a Keras backend function ' + 'should be a list or tuple.') + + self._inputs_structure = inputs + self.inputs = nest.flatten(inputs, expand_composites=True) + self._outputs_structure = outputs + self.outputs = cast_variables_to_tensor( + nest.flatten(outputs, expand_composites=True)) + # TODO(b/127668432): Consider using autograph to generate these + # dependencies in call. + # Index 0 = total loss or model output for `predict`. + with ops.control_dependencies([self.outputs[0]]): + updates_ops = [] + for update in updates: + if isinstance(update, tuple): + p, new_p = update + updates_ops.append(state_ops.assign(p, new_p)) + else: + # assumed already an op + updates_ops.append(update) + self.updates_op = control_flow_ops.group(*updates_ops) + self.name = name + # additional tensor substitutions + self.feed_dict = session_kwargs.pop('feed_dict', None) + # additional operations + self.fetches = session_kwargs.pop('fetches', []) + if not isinstance(self.fetches, list): + self.fetches = [self.fetches] + self.run_options = session_kwargs.pop('options', None) + self.run_metadata = session_kwargs.pop('run_metadata', None) + # The main use case of `fetches` being passed to a model is the ability + # to run custom updates + # This requires us to wrap fetches in `identity` ops. + self.fetches = [array_ops.identity(x) for x in self.fetches] + self.session_kwargs = session_kwargs + # This mapping keeps track of the function that should receive the + # output from a fetch in `fetches`: { fetch: function(fetch_output) } + # A Callback can use this to register a function with access to the + # output values for a fetch it added. + self.fetch_callbacks = {} + + if session_kwargs: + raise ValueError('Some keys in session_kwargs are not supported at this ' + 'time: %s' % (session_kwargs.keys(),)) + + self._callable_fn = None + self._feed_arrays = None + self._feed_symbols = None + self._symbol_vals = None + self._fetches = None + self._session = None + + def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session): + """Generates a callable that runs the graph. + + Arguments: + feed_arrays: List of input tensors to be fed Numpy arrays at runtime. + feed_symbols: List of input tensors to be fed symbolic tensors at runtime. + symbol_vals: List of symbolic tensors to be fed to `feed_symbols`. + session: Session to use to generate the callable. + + Returns: + Function that runs the graph according to the above options. + """ + # Prepare callable options. + callable_opts = config_pb2.CallableOptions() + # Handle external-data feed. + for x in feed_arrays: + callable_opts.feed.append(x.name) + if self.feed_dict: + for key in sorted(self.feed_dict.keys()): + callable_opts.feed.append(key.name) + # Handle symbolic feed. + for x, y in zip(feed_symbols, symbol_vals): + connection = callable_opts.tensor_connection.add() + if x.dtype != y.dtype: + y = math_ops.cast(y, dtype=x.dtype) + from_tensor = ops._as_graph_element(y) + if from_tensor is None: + from_tensor = y + connection.from_tensor = from_tensor.name # Data tensor + connection.to_tensor = x.name # Placeholder + # Handle fetches. + for x in self.outputs + self.fetches: + callable_opts.fetch.append(x.name) + # Handle updates. + callable_opts.target.append(self.updates_op.name) + # Handle run_options. + if self.run_options: + callable_opts.run_options.CopyFrom(self.run_options) + # Create callable. + callable_fn = session._make_callable_from_options(callable_opts) + # Cache parameters corresponding to the generated callable, so that + # we can detect future mismatches and refresh the callable. + self._callable_fn = callable_fn + self._feed_arrays = feed_arrays + self._feed_symbols = feed_symbols + self._symbol_vals = symbol_vals + self._fetches = list(self.fetches) + self._session = session + + def _call_fetch_callbacks(self, fetches_output): + for fetch, output in zip(self._fetches, fetches_output): + if fetch in self.fetch_callbacks: + self.fetch_callbacks[fetch](output) + + def _eval_if_composite(self, tensor): + """Helper method which evaluates any CompositeTensors passed to it.""" + # We need to evaluate any composite tensor objects that have been + # reconstructed in 'pack_sequence_as', since otherwise they'll be output as + # actual CompositeTensor objects instead of the value(s) contained in the + # CompositeTensors. E.g., if output_structure contains a SparseTensor, then + # this ensures that we return its value as a SparseTensorValue rather than + # a SparseTensor. + if isinstance(tensor, composite_tensor.CompositeTensor): + return self._session.run(tensor) + else: + return tensor + + def __call__(self, inputs): + inputs = nest.flatten(inputs, expand_composites=True) + + session = get_session(inputs) + feed_arrays = [] + array_vals = [] + feed_symbols = [] + symbol_vals = [] + for tensor, value in zip(self.inputs, inputs): + if value is None: + continue + + if tensor_util.is_tensor(value): + # Case: feeding symbolic tensor. + feed_symbols.append(tensor) + symbol_vals.append(value) + else: + # Case: feeding Numpy array. + feed_arrays.append(tensor) + # We need to do array conversion and type casting at this level, since + # `callable_fn` only supports exact matches. + tensor_type = dtypes_module.as_dtype(tensor.dtype) + array_vals.append(np.asarray(value, + dtype=tensor_type.as_numpy_dtype)) + + if self.feed_dict: + for key in sorted(self.feed_dict.keys()): + array_vals.append( + np.asarray(self.feed_dict[key], dtype=key.dtype.base_dtype.name)) + + # Refresh callable if anything has changed. + if (self._callable_fn is None or feed_arrays != self._feed_arrays or + symbol_vals != self._symbol_vals or + feed_symbols != self._feed_symbols or self.fetches != self._fetches or + session != self._session): + self._make_callable(feed_arrays, feed_symbols, symbol_vals, session) + + fetched = self._callable_fn(*array_vals, + run_metadata=self.run_metadata) + self._call_fetch_callbacks(fetched[-len(self._fetches):]) + output_structure = nest.pack_sequence_as( + self._outputs_structure, + fetched[:len(self.outputs)], + expand_composites=True) + # We need to evaluate any composite tensor objects that have been + # reconstructed in 'pack_sequence_as', since otherwise they'll be output as + # actual CompositeTensor objects instead of the value(s) contained in the + # CompositeTensors. E.g., if output_structure contains a SparseTensor, then + # this ensures that we return its value as a SparseTensorValue rather than + # a SparseTensor. + return nest.map_structure(self._eval_if_composite, output_structure) + + +class EagerExecutionFunction(object): + """Helper class for constructing a TF graph function from the Keras graph. + + Arguments: + inputs: Feed placeholders to the computation graph. + outputs: Output tensors to fetch. + updates: Additional update ops to be run at function call. + name: A name to help users identify what this function does. + session_kwargs: Unsupported. + """ + + def __init__(self, inputs, outputs, updates=None, name=None): + self.name = name + self._inputs_structure = inputs + inputs = nest.flatten(inputs, expand_composites=True) + self._outputs_structure = outputs + outputs = nest.flatten(outputs, expand_composites=True) + + updates = updates or [] + if not isinstance(updates, (list, tuple)): + raise TypeError('`updates` in a Keras backend function ' + 'should be a list or tuple.') + + if updates and not outputs: + # Edge case; never happens in practice + raise ValueError('Cannot create a Keras backend function with updates' + ' but no outputs during eager execution.') + graphs = { + i.graph + for i in nest.flatten([inputs, outputs, updates]) + if hasattr(i, 'graph') + } + if len(graphs) > 1: + raise ValueError('Cannot create an execution function which is comprised ' + 'of elements from multiple graphs.') + + source_graph = graphs.pop() + global_graph = get_graph() + + updates_ops = [] + legacy_update_ops = [] + for update in updates: + # For legacy reasons it is allowed to pass an update as a tuple + # `(variable, new_value)` (this maps to an assign op). Otherwise it + # is assumed to already be an op -- we cannot control its execution + # order. + if isinstance(update, tuple): + legacy_update_ops.append(update) + else: + if hasattr(update, 'op'): + update = update.op + if update is not None: + # `update.op` may have been None in certain cases. + updates_ops.append(update) + + self._freezable_vars_to_feed = [] + self._freezable_vars_values = [] + freezable_vars_from_keras_graph = object_identity.ObjectIdentitySet( + _FREEZABLE_VARS.get(global_graph, {})) + with _scratch_graph() as exec_graph: + global_graph = get_graph() + if source_graph not in (exec_graph, global_graph): + raise ValueError('Unknown graph. Aborting.') + + if source_graph is global_graph and exec_graph is not global_graph: + init_tensors = ( + outputs + updates_ops + [p for [p, _] in legacy_update_ops] + + [p_new for [_, p_new] in legacy_update_ops + if isinstance(p_new, ops.Tensor)]) + lifted_map = lift_to_graph.lift_to_graph( + tensors=init_tensors, + graph=exec_graph, + sources=inputs, + add_sources=True, + handle_captures=True, + base_graph=source_graph) + + inputs = [lifted_map[i] for i in inputs] + outputs = [lifted_map[i] for i in outputs] + updates_ops = [lifted_map[i] for i in updates_ops] + legacy_update_ops = [(lifted_map[p], lifted_map.get(p_new, p_new)) + for p, p_new in legacy_update_ops] + + # Keep track of the value to feed to any "freezable variables" + # created in this graph. + for old_op, new_op in lifted_map.items(): + if old_op in freezable_vars_from_keras_graph: + frozen_var = old_op + if frozen_var._initial_value != frozen_var._current_value: + # We only feed a frozen_variable if its value has changed; + # otherwise it can rely on the default value of the + # underlying placeholder_with_default. + self._freezable_vars_to_feed.append(new_op) + self._freezable_vars_values.append(frozen_var._current_value) + + # Consolidate updates + with exec_graph.as_default(): + outputs = cast_variables_to_tensor(outputs) + with ops.control_dependencies(outputs): + for p, p_new in legacy_update_ops: + updates_ops.append(state_ops.assign(p, p_new)) + + self.inputs, self.outputs = inputs, outputs + self._input_references = self.inputs + self._freezable_vars_to_feed + with ops.control_dependencies(updates_ops): + self.outputs[0] = array_ops.identity(self.outputs[0]) + + exec_graph.inputs = self._input_references + exec_graph.internal_captures + exec_graph.outputs = self.outputs + graph_fn = eager_function.ConcreteFunction(exec_graph) + + graph_fn._num_positional_args = len(self._input_references) + graph_fn._arg_keywords = [] + self._graph_fn = graph_fn + + # Handle placeholders with default + # (treated as required placeholder by graph functions) + self._placeholder_default_values = {} + with exec_graph.as_default(): + for x in self.inputs: + if x.op.type == 'PlaceholderWithDefault': + self._placeholder_default_values[ops.tensor_id( + x)] = tensor_util.constant_value(x.op.inputs[0]) + + def __call__(self, inputs): + input_values = nest.flatten(inputs, expand_composites=True) + + if self._freezable_vars_values: + input_values = input_values + self._freezable_vars_values + converted_inputs = [] + for tensor, value in zip(self._input_references, input_values): + if value is None: + # Assume `value` is a placeholder with default + value = self._placeholder_default_values.get( + ops.tensor_id(tensor), None) + if value is None: + raise ValueError( + 'You must feed a value for placeholder %s' % (tensor,)) + if not isinstance(value, ops.Tensor): + value = ops.convert_to_tensor(value, dtype=tensor.dtype) + if value.dtype != tensor.dtype: + # Temporary workaround due to `convert_to_tensor` not casting floats. + # See b/119637405 + value = math_ops.cast(value, tensor.dtype) + converted_inputs.append(value) + outputs = self._graph_fn(*converted_inputs) + + # EagerTensor.numpy() will often make a copy to ensure memory safety. + # However in this case `outputs` is not directly returned, so it is always + # safe to reuse the underlying buffer without checking. In such a case the + # private numpy conversion method is preferred to guarantee performance. + return nest.pack_sequence_as( + self._outputs_structure, + [x._numpy() for x in outputs], # pylint: disable=protected-access + expand_composites=True) + + +@keras_export('keras.backend.function') +def function(inputs, outputs, updates=None, name=None, **kwargs): + """Instantiates a Keras function. + + Arguments: + inputs: List of placeholder tensors. + outputs: List of output tensors. + updates: List of update ops. + name: String, name of function. + **kwargs: Passed to `tf.Session.run`. + + Returns: + Output values as Numpy arrays. + + Raises: + ValueError: if invalid kwargs are passed in or if in eager execution. + """ + if ops.executing_eagerly_outside_functions(): + if kwargs: + raise ValueError('Session keyword arguments are not support during ' + 'eager execution. You passed: %s' % (kwargs,)) + return EagerExecutionFunction(inputs, outputs, updates=updates, name=name) + + if kwargs: + for key in kwargs: + if (key not in tf_inspect.getfullargspec(session_module.Session.run)[0] + and key not in ['inputs', 'outputs', 'updates', 'name']): + msg = ('Invalid argument "%s" passed to K.function with TensorFlow ' + 'backend') % key + raise ValueError(msg) + return GraphExecutionFunction(inputs, outputs, updates=updates, **kwargs) + + +@keras_export('keras.backend.gradients') +def gradients(loss, variables): + """Returns the gradients of `loss` w.r.t. `variables`. + + Arguments: + loss: Scalar tensor to minimize. + variables: List of variables. + + Returns: + A gradients tensor. + """ + return gradients_module.gradients( + loss, variables, colocate_gradients_with_ops=True) + + +@keras_export('keras.backend.stop_gradient') +def stop_gradient(variables): + """Returns `variables` but with zero gradient w.r.t. every other variable. + + Arguments: + variables: Tensor or list of tensors to consider constant with respect + to any other variable. + + + Returns: + A single tensor or a list of tensors (depending on the passed argument) + that has no gradient with respect to any other variable. + """ + if isinstance(variables, (list, tuple)): + return map(array_ops.stop_gradient, variables) + return array_ops.stop_gradient(variables) + + +# CONTROL FLOW + + +@keras_export('keras.backend.rnn') +def rnn(step_function, + inputs, + initial_states, + go_backwards=False, + mask=None, + constants=None, + unroll=False, + input_length=None, + time_major=False, + zero_output_for_mask=False): + """Iterates over the time dimension of a tensor. + + Arguments: + step_function: RNN step function. + Args; + input; Tensor with shape `(samples, ...)` (no time dimension), + representing input for the batch of samples at a certain + time step. + states; List of tensors. + Returns; + output; Tensor with shape `(samples, output_dim)` + (no time dimension). + new_states; List of tensors, same length and shapes + as 'states'. The first state in the list must be the + output tensor at the previous timestep. + inputs: Tensor of temporal data of shape `(samples, time, ...)` + (at least 3D), or nested tensors, and each of which has shape + `(samples, time, ...)`. + initial_states: Tensor with shape `(samples, state_size)` + (no time dimension), containing the initial values for the states used + in the step function. In the case that state_size is in a nested + shape, the shape of initial_states will also follow the nested + structure. + go_backwards: Boolean. If True, do the iteration over the time + dimension in reverse order and return the reversed sequence. + mask: Binary tensor with shape `(samples, time, 1)`, + with a zero for every element that is masked. + constants: List of constant values passed at each step. + unroll: Whether to unroll the RNN or to use a symbolic `while_loop`. + input_length: If specified, assume time dimension is of this length. + time_major: Boolean. If true, the inputs and outputs will be in shape + `(timesteps, batch, ...)`, whereas in the False case, it will be + `(batch, timesteps, ...)`. Using `time_major = True` is a bit more + efficient because it avoids transposes at the beginning and end of the + RNN calculation. However, most TensorFlow data is batch-major, so by + default this function accepts input and emits output in batch-major + form. + zero_output_for_mask: Boolean. If True, the output for masked timestep + will be zeros, whereas in the False case, output from previous + timestep is returned. + Returns: + A tuple, `(last_output, outputs, new_states)`. + last_output: the latest output of the rnn, of shape `(samples, ...)` + outputs: tensor with shape `(samples, time, ...)` where each + entry `outputs[s, t]` is the output of the step function + at time `t` for sample `s`. + new_states: list of tensors, latest states returned by + the step function, of shape `(samples, ...)`. + + Raises: + ValueError: if input dimension is less than 3. + ValueError: if `unroll` is `True` but input timestep is not a fixed + number. + ValueError: if `mask` is provided (not `None`) but states is not provided + (`len(states)` == 0). + """ + + def swap_batch_timestep(input_t): + # Swap the batch and timestep dim for the incoming tensor. + axes = list(range(len(input_t.shape))) + axes[0], axes[1] = 1, 0 + return array_ops.transpose(input_t, axes) + + if not time_major: + inputs = nest.map_structure(swap_batch_timestep, inputs) + + flatted_inputs = nest.flatten(inputs) + time_steps = flatted_inputs[0].shape[0] + batch = flatted_inputs[0].shape[1] + time_steps_t = array_ops.shape(flatted_inputs[0])[0] + + for input_ in flatted_inputs: + input_.shape.with_rank_at_least(3) + + if mask is not None: + if mask.dtype != dtypes_module.bool: + mask = math_ops.cast(mask, dtypes_module.bool) + if len(mask.shape) == 2: + mask = expand_dims(mask) + if not time_major: + mask = swap_batch_timestep(mask) + + if constants is None: + constants = [] + + # tf.where needs its condition tensor to be the same shape as its two + # result tensors, but in our case the condition (mask) tensor is + # (nsamples, 1), and inputs are (nsamples, ndimensions) or even more. + # So we need to broadcast the mask to match the shape of inputs. + # That's what the tile call does, it just repeats the mask along its + # second dimension n times. + def _expand_mask(mask_t, input_t, fixed_dim=1): + assert not nest.is_sequence(mask_t) + assert not nest.is_sequence(input_t) + rank_diff = len(input_t.shape) - len(mask_t.shape) + for _ in range(rank_diff): + mask_t = array_ops.expand_dims(mask_t, -1) + multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:] + return array_ops.tile(mask_t, multiples) + + if unroll: + if not time_steps: + raise ValueError('Unrolling requires a fixed number of timesteps.') + states = tuple(initial_states) + successive_states = [] + successive_outputs = [] + + # Process the input tensors. The input tensor need to be split on the + # time_step dim, and reverse if go_backwards is True. In the case of nested + # input, the input is flattened and then transformed individually. + # The result of this will be a tuple of lists, each of the item in tuple is + # list of the tensor with shape (batch, feature) + def _process_single_input_t(input_t): + input_t = array_ops.unstack(input_t) # unstack for time_step dim + if go_backwards: + input_t.reverse() + return input_t + + if nest.is_sequence(inputs): + processed_input = nest.map_structure(_process_single_input_t, inputs) + else: + processed_input = (_process_single_input_t(inputs),) + + def _get_input_tensor(time): + inp = [t_[time] for t_ in processed_input] + return nest.pack_sequence_as(inputs, inp) + + if mask is not None: + mask_list = array_ops.unstack(mask) + if go_backwards: + mask_list.reverse() + + for i in range(time_steps): + inp = _get_input_tensor(i) + mask_t = mask_list[i] + output, new_states = step_function(inp, + tuple(states) + tuple(constants)) + tiled_mask_t = _expand_mask(mask_t, output) + + if not successive_outputs: + prev_output = zeros_like(output) + else: + prev_output = successive_outputs[-1] + + output = array_ops.where(tiled_mask_t, output, prev_output) + + return_states = [] + for state, new_state in zip(states, new_states): + # (see earlier comment for tile explanation) + tiled_mask_t = _expand_mask(mask_t, new_state) + return_states.append(array_ops.where(tiled_mask_t, new_state, state)) + states = return_states + successive_outputs.append(output) + successive_states.append(states) + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = array_ops.stack(successive_outputs) + + if zero_output_for_mask: + last_output = array_ops.where( + _expand_mask(mask_list[-1], last_output), + last_output, + zeros_like(last_output)) + outputs = array_ops.where( + _expand_mask(mask, outputs, fixed_dim=2), + outputs, + zeros_like(outputs)) + + else: + for i in range(time_steps): + inp = _get_input_tensor(i) + output, states = step_function(inp, tuple(states) + tuple(constants)) + successive_outputs.append(output) + successive_states.append(states) + last_output = successive_outputs[-1] + new_states = successive_states[-1] + outputs = array_ops.stack(successive_outputs) + + else: + states = tuple(initial_states) + + # Create input tensor array, if the inputs is nested tensors, then it will + # be flattened first, and tensor array will be created one per flattened + # tensor. + input_ta = tuple( + tensor_array_ops.TensorArray( + dtype=inp.dtype, + size=time_steps_t, + tensor_array_name='input_ta_%s' % i) + for i, inp in enumerate(flatted_inputs)) + input_ta = tuple( + ta.unstack(input_) if not go_backwards else ta + .unstack(reverse(input_, 0)) + for ta, input_ in zip(input_ta, flatted_inputs)) + + # Get the time(0) input and compute the output for that, the output will be + # used to determine the dtype of output tensor array. Don't read from + # input_ta due to TensorArray clear_after_read default to True. + input_time_zero = nest.pack_sequence_as(inputs, + [inp[0] for inp in flatted_inputs]) + # output_time_zero is used to determine the cell output shape and its dtype. + # the value is discarded. + output_time_zero, _ = step_function( + input_time_zero, tuple(initial_states) + tuple(constants)) + output_ta = tuple( + tensor_array_ops.TensorArray( + dtype=out.dtype, + size=time_steps_t, + element_shape=out.shape, + tensor_array_name='output_ta_%s' % i) + for i, out in enumerate(nest.flatten(output_time_zero))) + + time = constant_op.constant(0, dtype='int32', name='time') + + while_loop_kwargs = { + 'cond': lambda time, *_: time < time_steps_t, + 'maximum_iterations': input_length, + 'parallel_iterations': 32, + 'swap_memory': True, + } + + if mask is not None: + if not states: + raise ValueError('No initial states provided! ' + 'When using masking in an RNN, you should ' + 'provide initial states ' + '(and your step function should return ' + 'as its first state at time `t` ' + 'the output at time `t-1`).') + if go_backwards: + mask = reverse(mask, 0) + + mask_ta = tensor_array_ops.TensorArray( + dtype=dtypes_module.bool, + size=time_steps_t, + tensor_array_name='mask_ta') + mask_ta = mask_ta.unstack(mask) + + # Mask for the T output will be base on the output of T - 1. In the case + # T = 0, a zero filled tensor will be used. + flat_zero_output = tuple(array_ops.zeros_like(o) + for o in nest.flatten(output_time_zero)) + def _step(time, output_ta_t, prev_output, *states): + """RNN step function. + + Arguments: + time: Current timestep value. + output_ta_t: TensorArray. + prev_output: tuple of outputs from time - 1. + *states: List of states. + + Returns: + Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)` + """ + current_input = tuple(ta.read(time) for ta in input_ta) + # maybe set shape. + current_input = nest.pack_sequence_as(inputs, current_input) + mask_t = mask_ta.read(time) + output, new_states = step_function(current_input, + tuple(states) + tuple(constants)) + # mask output + flat_output = nest.flatten(output) + flat_mask_output = (flat_zero_output if zero_output_for_mask + else nest.flatten(prev_output)) + tiled_mask_t = tuple(_expand_mask(mask_t, o) for o in flat_output) + flat_new_output = tuple( + array_ops.where(m, o, zo) for m, o, zo in zip( + tiled_mask_t, flat_output, flat_mask_output)) + + # mask states + flat_state = nest.flatten(states) + flat_new_state = nest.flatten(new_states) + for state, new_state in zip(flat_state, flat_new_state): + if isinstance(new_state, ops.Tensor): + new_state.set_shape(state.shape) + tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_state) + flat_final_state = tuple( + array_ops.where(m, s, ps) + for m, s, ps in zip(tiled_mask_t, flat_new_state, flat_state)) + new_states = nest.pack_sequence_as(new_states, flat_final_state) + + output_ta_t = tuple( + ta.write(time, out) + for ta, out in zip(output_ta_t, flat_new_output)) + return (time + 1, output_ta_t, + tuple(flat_new_output)) + tuple(new_states) + + final_outputs = control_flow_ops.while_loop( + body=_step, + loop_vars=(time, output_ta, flat_zero_output) + states, + **while_loop_kwargs) + # Skip final_outputs[2] which is the output for final timestep. + new_states = final_outputs[3:] + else: + def _step(time, output_ta_t, *states): + """RNN step function. + + Arguments: + time: Current timestep value. + output_ta_t: TensorArray. + *states: List of states. + + Returns: + Tuple: `(time + 1,output_ta_t) + tuple(new_states)` + """ + current_input = tuple(ta.read(time) for ta in input_ta) + current_input = nest.pack_sequence_as(inputs, current_input) + output, new_states = step_function(current_input, + tuple(states) + tuple(constants)) + flat_state = nest.flatten(states) + flat_new_state = nest.flatten(new_states) + for state, new_state in zip(flat_state, flat_new_state): + if isinstance(new_state, ops.Tensor): + new_state.set_shape(state.shape) + + flat_output = nest.flatten(output) + output_ta_t = tuple( + ta.write(time, out) for ta, out in zip(output_ta_t, flat_output)) + new_states = nest.pack_sequence_as(initial_states, flat_new_state) + return (time + 1, output_ta_t) + tuple(new_states) + + final_outputs = control_flow_ops.while_loop( + body=_step, + loop_vars=(time, output_ta) + states, + **while_loop_kwargs) + new_states = final_outputs[2:] + + output_ta = final_outputs[1] + + outputs = tuple(o.stack() for o in output_ta) + last_output = tuple(o[-1] for o in outputs) + + outputs = nest.pack_sequence_as(output_time_zero, outputs) + last_output = nest.pack_sequence_as(output_time_zero, last_output) + + # static shape inference + def set_shape(output_): + if isinstance(output_, ops.Tensor): + shape = output_.shape.as_list() + shape[0] = time_steps + shape[1] = batch + output_.set_shape(shape) + return output_ + + outputs = nest.map_structure(set_shape, outputs) + + if not time_major: + outputs = nest.map_structure(swap_batch_timestep, outputs) + + return last_output, outputs, new_states + + +@keras_export('keras.backend.switch') +def switch(condition, then_expression, else_expression): + """Switches between two operations depending on a scalar value. + + Note that both `then_expression` and `else_expression` + should be symbolic tensors of the *same shape*. + + Arguments: + condition: tensor (`int` or `bool`). + then_expression: either a tensor, or a callable that returns a tensor. + else_expression: either a tensor, or a callable that returns a tensor. + + Returns: + The selected tensor. + + Raises: + ValueError: If rank of `condition` is greater than rank of expressions. + """ + if condition.dtype != dtypes_module.bool: + condition = math_ops.cast(condition, 'bool') + cond_ndim = ndim(condition) + if not cond_ndim: + if not callable(then_expression): + + def then_expression_fn(): + return then_expression + else: + then_expression_fn = then_expression + if not callable(else_expression): + + def else_expression_fn(): + return else_expression + else: + else_expression_fn = else_expression + x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn) + else: + # tf.where needs its condition tensor + # to be the same shape as its two + # result tensors + if callable(then_expression): + then_expression = then_expression() + if callable(else_expression): + else_expression = else_expression() + expr_ndim = ndim(then_expression) + if cond_ndim > expr_ndim: + raise ValueError('Rank of `condition` should be less than or' + ' equal to rank of `then_expression` and ' + '`else_expression`. ndim(condition)=' + str(cond_ndim) + + ', ndim(then_expression)' + '=' + str(expr_ndim)) + if cond_ndim > 1: + ndim_diff = expr_ndim - cond_ndim + cond_shape = array_ops.concat( + [array_ops.shape(condition), [1] * ndim_diff], axis=0) + condition = array_ops.reshape(condition, cond_shape) + expr_shape = array_ops.shape(then_expression) + shape_diff = expr_shape - cond_shape + tile_shape = array_ops.where(shape_diff > 0, expr_shape, + array_ops.ones_like(expr_shape)) + condition = array_ops.tile(condition, tile_shape) + x = array_ops.where(condition, then_expression, else_expression) + return x + + +@keras_export('keras.backend.in_train_phase') +def in_train_phase(x, alt, training=None): + """Selects `x` in train phase, and `alt` otherwise. + + Note that `alt` should have the *same shape* as `x`. + + Arguments: + x: What to return in train phase + (tensor or callable that returns a tensor). + alt: What to return otherwise + (tensor or callable that returns a tensor). + training: Optional scalar tensor + (or Python boolean, or Python integer) + specifying the learning phase. + + Returns: + Either `x` or `alt` based on the `training` flag. + the `training` flag defaults to `K.learning_phase()`. + """ + if training is None: + training = learning_phase() + + # TODO(b/138862903): Handle the case when training is tensor. + if not tensor_util.is_tensor(training): + if training == 1 or training is True: + if callable(x): + return x() + else: + return x + + elif training == 0 or training is False: + if callable(alt): + return alt() + else: + return alt + + # else: assume learning phase is a placeholder tensor. + x = switch(training, x, alt) + return x + + +@keras_export('keras.backend.in_test_phase') +def in_test_phase(x, alt, training=None): + """Selects `x` in test phase, and `alt` otherwise. + + Note that `alt` should have the *same shape* as `x`. + + Arguments: + x: What to return in test phase + (tensor or callable that returns a tensor). + alt: What to return otherwise + (tensor or callable that returns a tensor). + training: Optional scalar tensor + (or Python boolean, or Python integer) + specifying the learning phase. + + Returns: + Either `x` or `alt` based on `K.learning_phase`. + """ + return in_train_phase(alt, x, training=training) + + +# NN OPERATIONS + + +@keras_export('keras.backend.relu') +def relu(x, alpha=0., max_value=None, threshold=0): + """Rectified linear unit. + + With default values, it returns element-wise `max(x, 0)`. + + Otherwise, it follows: + `f(x) = max_value` for `x >= max_value`, + `f(x) = x` for `threshold <= x < max_value`, + `f(x) = alpha * (x - threshold)` otherwise. + + Arguments: + x: A tensor or variable. + alpha: A scalar, slope of negative section (default=`0.`). + max_value: float. Saturation threshold. + threshold: float. Threshold value for thresholded activation. + + Returns: + A tensor. + """ + + if alpha != 0.: + if max_value is None and threshold == 0: + return nn.leaky_relu(x, alpha=alpha) + + if threshold != 0: + negative_part = nn.relu(-x + threshold) + else: + negative_part = nn.relu(-x) + + clip_max = max_value is not None + + if threshold != 0: + # computes x for x > threshold else 0 + x = x * math_ops.cast(math_ops.greater(x, threshold), floatx()) + elif max_value == 6: + # if no threshold, then can use nn.relu6 native TF op for performance + x = nn.relu6(x) + clip_max = False + else: + x = nn.relu(x) + + if clip_max: + max_value = _constant_to_tensor(max_value, x.dtype.base_dtype) + zero = _constant_to_tensor(0., x.dtype.base_dtype) + x = clip_ops.clip_by_value(x, zero, max_value) + + if alpha != 0.: + alpha = _to_tensor(alpha, x.dtype.base_dtype) + x -= alpha * negative_part + return x + + +@keras_export('keras.backend.elu') +def elu(x, alpha=1.): + """Exponential linear unit. + + Arguments: + x: A tensor or variable to compute the activation function for. + alpha: A scalar, slope of negative section. + + Returns: + A tensor. + """ + res = nn.elu(x) + if alpha == 1: + return res + else: + return array_ops.where(x > 0, res, alpha * res) + + +@keras_export('keras.backend.softmax') +def softmax(x, axis=-1): + """Softmax of a tensor. + + Arguments: + x: A tensor or variable. + axis: The dimension softmax would be performed on. + The default is -1 which indicates the last dimension. + + Returns: + A tensor. + """ + return nn.softmax(x, axis=axis) + + +@keras_export('keras.backend.softplus') +def softplus(x): + """Softplus of a tensor. + + Arguments: + x: A tensor or variable. + + Returns: + A tensor. + """ + return nn.softplus(x) + + +@keras_export('keras.backend.softsign') +def softsign(x): + """Softsign of a tensor. + + Arguments: + x: A tensor or variable. + + Returns: + A tensor. + """ + return nn.softsign(x) + + +@keras_export('keras.backend.categorical_crossentropy') +def categorical_crossentropy(target, output, from_logits=False, axis=-1): + """Categorical crossentropy between an output tensor and a target tensor. + + Arguments: + target: A tensor of the same shape as `output`. + output: A tensor resulting from a softmax + (unless `from_logits` is True, in which + case `output` is expected to be the logits). + from_logits: Boolean, whether `output` is the + result of a softmax, or is a tensor of logits. + axis: Int specifying the channels axis. `axis=-1` corresponds to data + format `channels_last', and `axis=1` corresponds to data format + `channels_first`. + + Returns: + Output tensor. + + Raises: + ValueError: if `axis` is neither -1 nor one of the axes of `output`. + + Example: + ```python: + import tensorflow as tf + from tensorflow.keras import backend as K + a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3]) + print("a: ", a) + b = tf.constant([.9, .05, .05, .5, .89, .6, .05, .01, .94], shape=[3,3]) + print("b: ", b) + loss = K.categorical_crossentropy(a, b) + print('Loss: ', loss) #Loss: tf.Tensor([0.10536055 0.8046684 0.06187541], shape=(3,), dtype=float32) + loss = K.categorical_crossentropy(a, a) + print('Loss: ', loss) #Loss: tf.Tensor([1.1920929e-07 1.1920929e-07 1.1920929e-07], shape=(3,), dtype=float32) + ``` + """ + if not from_logits: + if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or + output.op.type != 'Softmax'): + # scale preds so that the class probas of each sample sum to 1 + output = output / math_ops.reduce_sum(output, axis, True) + # Compute cross entropy from probabilities. + epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) + output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_) + return -math_ops.reduce_sum(target * math_ops.log(output), axis) + else: + # When softmax activation function is used for output operation, we + # use logits from the softmax function directly to compute loss in order + # to prevent collapsing zero when training. + # See b/117284466 + assert len(output.op.inputs) == 1 + output = output.op.inputs[0] + return nn.softmax_cross_entropy_with_logits_v2( + labels=target, logits=output, axis=axis) + + +@keras_export('keras.backend.sparse_categorical_crossentropy') +def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): + """Categorical crossentropy with integer targets. + + Arguments: + target: An integer tensor. + output: A tensor resulting from a softmax + (unless `from_logits` is True, in which + case `output` is expected to be the logits). + from_logits: Boolean, whether `output` is the + result of a softmax, or is a tensor of logits. + axis: Int specifying the channels axis. `axis=-1` corresponds to data + format `channels_last', and `axis=1` corresponds to data format + `channels_first`. + + Returns: + Output tensor. + + Raises: + ValueError: if `axis` is neither -1 nor one of the axes of `output`. + """ + if not from_logits: + if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or + output.op.type != 'Softmax'): + epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) + output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_) + output = math_ops.log(output) + else: + # When softmax activation function is used for output operation, we + # use logits from the softmax function directly to compute loss in order + # to prevent collapsing zero when training. + # See b/117284466 + assert len(output.op.inputs) == 1 + output = output.op.inputs[0] + + if isinstance(output.shape, (tuple, list)): + output_rank = len(output.shape) + else: + output_rank = output.shape.ndims + if output_rank is not None: + axis %= output_rank + if axis != output_rank - 1: + permutation = list( + itertools.chain(range(axis), range(axis + 1, output_rank), [axis])) + output = array_ops.transpose(output, perm=permutation) + elif axis != -1: + raise ValueError( + 'Cannot compute sparse categorical crossentropy with `axis={}` on an ' + 'output tensor with unknown rank'.format(axis)) + + # target = cast(target, 'int64') + target = cast(target, 'int32') + + # Try to adjust the shape so that rank of labels = 1 - rank of logits. + output_shape = array_ops.shape_v2(output) + target_rank = target.shape.ndims + + update_shape = ( + target_rank is not None and output_rank is not None and + target_rank != output_rank - 1) + if update_shape: + target = flatten(target) + output = array_ops.reshape(output, [-1, output_shape[-1]]) + + if py_any([_is_symbolic_tensor(v) for v in [target, output]]): + with get_graph().as_default(): + res = nn.sparse_softmax_cross_entropy_with_logits_v2( + labels=target, logits=output) + else: + res = nn.sparse_softmax_cross_entropy_with_logits_v2( + labels=target, logits=output) + + if update_shape and output_rank >= 3: + # If our output includes timesteps or spatial dimensions we need to reshape + return array_ops.reshape(res, output_shape[:-1]) + else: + return res + + +@keras_export('keras.backend.binary_crossentropy') +def binary_crossentropy(target, output, from_logits=False): + """Binary crossentropy between an output tensor and a target tensor. + + Arguments: + target: A tensor with the same shape as `output`. + output: A tensor. + from_logits: Whether `output` is expected to be a logits tensor. + By default, we consider that `output` + encodes a probability distribution. + + Returns: + A tensor. + """ + if not from_logits: + if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or + output.op.type != 'Sigmoid'): + epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) + output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_) + + # Compute cross entropy from probabilities. + bce = target * math_ops.log(output + epsilon()) + bce += (1 - target) * math_ops.log(1 - output + epsilon()) + return -bce + else: + # When sigmoid activation function is used for output operation, we + # use logits from the sigmoid function directly to compute loss in order + # to prevent collapsing zero when training. + assert len(output.op.inputs) == 1 + output = output.op.inputs[0] + return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) + + +@keras_export('keras.backend.sigmoid') +def sigmoid(x): + """Element-wise sigmoid. + + Arguments: + x: A tensor or variable. + + Returns: + A tensor. + """ + return nn.sigmoid(x) + + +@keras_export('keras.backend.hard_sigmoid') +def hard_sigmoid(x): + """Segment-wise linear approximation of sigmoid. + + Faster than sigmoid. + Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`. + In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`. + + Arguments: + x: A tensor or variable. + + Returns: + A tensor. + """ + point_two = _constant_to_tensor(0.2, x.dtype.base_dtype) + point_five = _constant_to_tensor(0.5, x.dtype.base_dtype) + x = math_ops.mul(x, point_two) + x = math_ops.add(x, point_five) + x = clip_ops.clip_by_value(x, 0., 1.) + return x + + +@keras_export('keras.backend.tanh') +def tanh(x): + """Element-wise tanh. + + Arguments: + x: A tensor or variable. + + Returns: + A tensor. + """ + return nn.tanh(x) + + +@keras_export('keras.backend.dropout') +def dropout(x, level, noise_shape=None, seed=None): + """Sets entries in `x` to zero at random, while scaling the entire tensor. + + Arguments: + x: tensor + level: fraction of the entries in the tensor + that will be set to 0. + noise_shape: shape for randomly generated keep/drop flags, + must be broadcastable to the shape of `x` + seed: random seed to ensure determinism. + + Returns: + A tensor. + """ + if seed is None: + seed = np.random.randint(10e6) + return nn.dropout_v2(x, rate=level, noise_shape=noise_shape, seed=seed) + + +@keras_export('keras.backend.l2_normalize') +def l2_normalize(x, axis=None): + """Normalizes a tensor wrt the L2 norm alongside the specified axis. + + Arguments: + x: Tensor or variable. + axis: axis along which to perform normalization. + + Returns: + A tensor. + """ + return nn.l2_normalize(x, axis=axis) + + +@keras_export('keras.backend.in_top_k') +def in_top_k(predictions, targets, k): + """Returns whether the `targets` are in the top `k` `predictions`. + + Arguments: + predictions: A tensor of shape `(batch_size, classes)` and type `float32`. + targets: A 1D tensor of length `batch_size` and type `int32` or `int64`. + k: An `int`, number of top elements to consider. + + Returns: + A 1D tensor of length `batch_size` and type `bool`. + `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k` + values of `predictions[i]`. + """ + return nn.in_top_k(predictions, targets, k) + + +# CONVOLUTIONS + + +def _preprocess_conv1d_input(x, data_format): + """Transpose and cast the input before the conv1d. + + Arguments: + x: input tensor. + data_format: string, `"channels_last"` or `"channels_first"`. + + Returns: + A tensor. + """ + tf_data_format = 'NWC' # to pass TF Conv2dNative operations + if data_format == 'channels_first': + if not _has_nchw_support(): + x = array_ops.transpose(x, (0, 2, 1)) # NCW -> NWC + else: + tf_data_format = 'NCW' + return x, tf_data_format + + +def _preprocess_conv2d_input(x, data_format, force_transpose=False): + """Transpose and cast the input before the conv2d. + + Arguments: + x: input tensor. + data_format: string, `"channels_last"` or `"channels_first"`. + force_transpose: Boolean. If True, the input will always be transposed + from NCHW to NHWC if `data_format` is `"channels_first"`. + If False, the transposition only occurs on CPU (GPU ops are + assumed to support NCHW). + + Returns: + A tensor. + """ + tf_data_format = 'NHWC' + if data_format == 'channels_first': + if not _has_nchw_support() or force_transpose: + x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC + else: + tf_data_format = 'NCHW' + return x, tf_data_format + + +def _preprocess_conv3d_input(x, data_format): + """Transpose and cast the input before the conv3d. + + Arguments: + x: input tensor. + data_format: string, `"channels_last"` or `"channels_first"`. + + Returns: + A tensor. + """ + tf_data_format = 'NDHWC' + if data_format == 'channels_first': + if not _has_nchw_support(): + x = array_ops.transpose(x, (0, 2, 3, 4, 1)) + else: + tf_data_format = 'NCDHW' + return x, tf_data_format + + +def _preprocess_padding(padding): + """Convert keras' padding to TensorFlow's padding. + + Arguments: + padding: string, one of 'same' , 'valid' + + Returns: + a string, one of 'SAME', 'VALID'. + + Raises: + ValueError: if invalid `padding'` + """ + if padding == 'same': + padding = 'SAME' + elif padding == 'valid': + padding = 'VALID' + else: + raise ValueError('Invalid padding: ' + str(padding)) + return padding + + +@keras_export('keras.backend.conv1d') +def conv1d(x, + kernel, + strides=1, + padding='valid', + data_format=None, + dilation_rate=1): + """1D convolution. + + Arguments: + x: Tensor or variable. + kernel: kernel tensor. + strides: stride integer. + padding: string, `"same"`, `"causal"` or `"valid"`. + data_format: string, one of "channels_last", "channels_first". + dilation_rate: integer dilate rate. + + Returns: + A tensor, result of 1D convolution. + + Raises: + ValueError: if `data_format` is neither `channels_last` or + `channels_first`. + """ + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + + kernel_shape = kernel.shape.as_list() + if padding == 'causal': + # causal (dilated) convolution: + left_pad = dilation_rate * (kernel_shape[0] - 1) + x = temporal_padding(x, (left_pad, 0)) + padding = 'valid' + padding = _preprocess_padding(padding) + + x, tf_data_format = _preprocess_conv1d_input(x, data_format) + x = nn.convolution( + input=x, + filter=kernel, + dilation_rate=dilation_rate, + strides=strides, + padding=padding, + data_format=tf_data_format) + if data_format == 'channels_first' and tf_data_format == 'NWC': + x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW + return x + + +@keras_export('keras.backend.conv2d') +def conv2d(x, + kernel, + strides=(1, 1), + padding='valid', + data_format=None, + dilation_rate=(1, 1)): + """2D convolution. + + Arguments: + x: Tensor or variable. + kernel: kernel tensor. + strides: strides tuple. + padding: string, `"same"` or `"valid"`. + data_format: `"channels_last"` or `"channels_first"`. + dilation_rate: tuple of 2 integers. + + Returns: + A tensor, result of 2D convolution. + + Raises: + ValueError: if `data_format` is neither `channels_last` or + `channels_first`. + """ + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + + x, tf_data_format = _preprocess_conv2d_input(x, data_format) + padding = _preprocess_padding(padding) + x = nn.convolution( + input=x, + filter=kernel, + dilation_rate=dilation_rate, + strides=strides, + padding=padding, + data_format=tf_data_format) + if data_format == 'channels_first' and tf_data_format == 'NHWC': + x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW + return x + + +@keras_export('keras.backend.conv2d_transpose') +def conv2d_transpose(x, + kernel, + output_shape, + strides=(1, 1), + padding='valid', + data_format=None, + dilation_rate=(1, 1)): + """2D deconvolution (i.e. + + transposed convolution). + + Arguments: + x: Tensor or variable. + kernel: kernel tensor. + output_shape: 1D int tensor for the output shape. + strides: strides tuple. + padding: string, `"same"` or `"valid"`. + data_format: string, `"channels_last"` or `"channels_first"`. + dilation_rate: Tuple of 2 integers. + + Returns: + A tensor, result of transposed 2D convolution. + + Raises: + ValueError: if `data_format` is neither `channels_last` or + `channels_first`. + """ + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + + # `atrous_conv2d_transpose` only supports NHWC format, even on GPU. + if data_format == 'channels_first' and dilation_rate != (1, 1): + force_transpose = True + else: + force_transpose = False + + x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose) + + if data_format == 'channels_first' and tf_data_format == 'NHWC': + output_shape = (output_shape[0], output_shape[2], output_shape[3], + output_shape[1]) + if output_shape[0] is None: + output_shape = (shape(x)[0],) + tuple(output_shape[1:]) + + if isinstance(output_shape, (tuple, list)): + output_shape = array_ops.stack(list(output_shape)) + + padding = _preprocess_padding(padding) + if tf_data_format == 'NHWC': + strides = (1,) + strides + (1,) + else: + strides = (1, 1) + strides + + if dilation_rate == (1, 1): + x = nn.conv2d_transpose(x, kernel, output_shape, strides, + padding=padding, + data_format=tf_data_format) + else: + assert dilation_rate[0] == dilation_rate[1] + x = nn.atrous_conv2d_transpose( + x, + kernel, + output_shape, + rate=dilation_rate[0], + padding=padding) + if data_format == 'channels_first' and tf_data_format == 'NHWC': + x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW + return x + + +def separable_conv1d(x, + depthwise_kernel, + pointwise_kernel, + strides=1, + padding='valid', + data_format=None, + dilation_rate=1): + """1D convolution with separable filters. + + Arguments: + x: input tensor + depthwise_kernel: convolution kernel for the depthwise convolution. + pointwise_kernel: kernel for the 1x1 convolution. + strides: stride integer. + padding: string, `"same"` or `"valid"`. + data_format: string, `"channels_last"` or `"channels_first"`. + dilation_rate: integer dilation rate. + + Returns: + Output tensor. + + Raises: + ValueError: if `data_format` is neither `channels_last` or + `channels_first`. + """ + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + + if isinstance(strides, int): + strides = (strides,) + if isinstance(dilation_rate, int): + dilation_rate = (dilation_rate,) + + x, tf_data_format = _preprocess_conv1d_input(x, data_format) + padding = _preprocess_padding(padding) + if not isinstance(strides, tuple): + strides = tuple(strides) + if tf_data_format == 'NWC': + spatial_start_dim = 1 + strides = (1,) + strides * 2 + (1,) + else: + spatial_start_dim = 2 + strides = (1, 1) + strides * 2 + x = array_ops.expand_dims(x, spatial_start_dim) + depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0) + pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0) + dilation_rate = (1,) + dilation_rate + + x = nn.separable_conv2d( + x, + depthwise_kernel, + pointwise_kernel, + strides=strides, + padding=padding, + rate=dilation_rate, + data_format=tf_data_format) + + x = array_ops.squeeze(x, [spatial_start_dim]) + + if data_format == 'channels_first' and tf_data_format == 'NWC': + x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW + + return x + + +@keras_export('keras.backend.separable_conv2d') +def separable_conv2d(x, + depthwise_kernel, + pointwise_kernel, + strides=(1, 1), + padding='valid', + data_format=None, + dilation_rate=(1, 1)): + """2D convolution with separable filters. + + Arguments: + x: input tensor + depthwise_kernel: convolution kernel for the depthwise convolution. + pointwise_kernel: kernel for the 1x1 convolution. + strides: strides tuple (length 2). + padding: string, `"same"` or `"valid"`. + data_format: string, `"channels_last"` or `"channels_first"`. + dilation_rate: tuple of integers, + dilation rates for the separable convolution. + + Returns: + Output tensor. + + Raises: + ValueError: if `data_format` is neither `channels_last` or + `channels_first`. + ValueError: if `strides` is not a tuple of 2 integers. + """ + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + if len(strides) != 2: + raise ValueError('`strides` must be a tuple of 2 integers.') + + x, tf_data_format = _preprocess_conv2d_input(x, data_format) + padding = _preprocess_padding(padding) + if not isinstance(strides, tuple): + strides = tuple(strides) + if tf_data_format == 'NHWC': + strides = (1,) + strides + (1,) + else: + strides = (1, 1) + strides + + x = nn.separable_conv2d( + x, + depthwise_kernel, + pointwise_kernel, + strides=strides, + padding=padding, + rate=dilation_rate, + data_format=tf_data_format) + if data_format == 'channels_first' and tf_data_format == 'NHWC': + x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW + return x + + +def depthwise_conv2d(x, + depthwise_kernel, + strides=(1, 1), + padding='valid', + data_format=None, + dilation_rate=(1, 1)): + """2D convolution with separable filters. + + Arguments: + x: input tensor + depthwise_kernel: convolution kernel for the depthwise convolution. + strides: strides tuple (length 2). + padding: string, `"same"` or `"valid"`. + data_format: string, `"channels_last"` or `"channels_first"`. + dilation_rate: tuple of integers, + dilation rates for the separable convolution. + + Returns: + Output tensor. + + Raises: + ValueError: if `data_format` is neither `channels_last` or + `channels_first`. + """ + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + + x, tf_data_format = _preprocess_conv2d_input(x, data_format) + padding = _preprocess_padding(padding) + if tf_data_format == 'NHWC': + strides = (1,) + strides + (1,) + else: + strides = (1, 1) + strides + + x = nn.depthwise_conv2d( + x, + depthwise_kernel, + strides=strides, + padding=padding, + rate=dilation_rate, + data_format=tf_data_format) + if data_format == 'channels_first' and tf_data_format == 'NHWC': + x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW + return x + + +@keras_export('keras.backend.conv3d') +def conv3d(x, + kernel, + strides=(1, 1, 1), + padding='valid', + data_format=None, + dilation_rate=(1, 1, 1)): + """3D convolution. + + Arguments: + x: Tensor or variable. + kernel: kernel tensor. + strides: strides tuple. + padding: string, `"same"` or `"valid"`. + data_format: string, `"channels_last"` or `"channels_first"`. + dilation_rate: tuple of 3 integers. + + Returns: + A tensor, result of 3D convolution. + + Raises: + ValueError: if `data_format` is neither `channels_last` or + `channels_first`. + """ + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + + x, tf_data_format = _preprocess_conv3d_input(x, data_format) + padding = _preprocess_padding(padding) + x = nn.convolution( + input=x, + filter=kernel, + dilation_rate=dilation_rate, + strides=strides, + padding=padding, + data_format=tf_data_format) + if data_format == 'channels_first' and tf_data_format == 'NDHWC': + x = array_ops.transpose(x, (0, 4, 1, 2, 3)) + return x + + +def conv3d_transpose(x, + kernel, + output_shape, + strides=(1, 1, 1), + padding='valid', + data_format=None): + """3D deconvolution (i.e. + + transposed convolution). + + Arguments: + x: input tensor. + kernel: kernel tensor. + output_shape: 1D int tensor for the output shape. + strides: strides tuple. + padding: string, "same" or "valid". + data_format: string, `"channels_last"` or `"channels_first"`. + + Returns: + A tensor, result of transposed 3D convolution. + + Raises: + ValueError: if `data_format` is neither `channels_last` or + `channels_first`. + """ + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + if isinstance(output_shape, (tuple, list)): + output_shape = array_ops.stack(output_shape) + + x, tf_data_format = _preprocess_conv3d_input(x, data_format) + + if data_format == 'channels_first' and tf_data_format == 'NDHWC': + output_shape = (output_shape[0], output_shape[2], output_shape[3], + output_shape[4], output_shape[1]) + if output_shape[0] is None: + output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:]) + output_shape = array_ops.stack(list(output_shape)) + + padding = _preprocess_padding(padding) + if tf_data_format == 'NDHWC': + strides = (1,) + strides + (1,) + else: + strides = (1, 1) + strides + + x = nn.conv3d_transpose( + x, + kernel, + output_shape, + strides, + padding=padding, + data_format=tf_data_format) + if data_format == 'channels_first' and tf_data_format == 'NDHWC': + x = array_ops.transpose(x, (0, 4, 1, 2, 3)) + return x + + +@keras_export('keras.backend.pool2d') +def pool2d(x, + pool_size, + strides=(1, 1), + padding='valid', + data_format=None, + pool_mode='max'): + """2D Pooling. + + Arguments: + x: Tensor or variable. + pool_size: tuple of 2 integers. + strides: tuple of 2 integers. + padding: string, `"same"` or `"valid"`. + data_format: string, `"channels_last"` or `"channels_first"`. + pool_mode: string, `"max"` or `"avg"`. + + Returns: + A tensor, result of 2D pooling. + + Raises: + ValueError: if `data_format` is neither `"channels_last"` or + `"channels_first"`. + ValueError: if `pool_size` is not a tuple of 2 integers. + ValueError: if `strides` is not a tuple of 2 integers. + ValueError: if `pool_mode` is neither `"max"` or `"avg"`. + """ + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + if len(pool_size) != 2: + raise ValueError('`pool_size` must be a tuple of 2 integers.') + if len(strides) != 2: + raise ValueError('`strides` must be a tuple of 2 integers.') + + x, tf_data_format = _preprocess_conv2d_input(x, data_format) + padding = _preprocess_padding(padding) + if tf_data_format == 'NHWC': + strides = (1,) + strides + (1,) + pool_size = (1,) + pool_size + (1,) + else: + strides = (1, 1) + strides + pool_size = (1, 1) + pool_size + + if pool_mode == 'max': + x = nn.max_pool( + x, pool_size, strides, padding=padding, data_format=tf_data_format) + elif pool_mode == 'avg': + x = nn.avg_pool( + x, pool_size, strides, padding=padding, data_format=tf_data_format) + else: + raise ValueError('Invalid pooling mode: ' + str(pool_mode)) + + if data_format == 'channels_first' and tf_data_format == 'NHWC': + x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW + return x + + +@keras_export('keras.backend.pool3d') +def pool3d(x, + pool_size, + strides=(1, 1, 1), + padding='valid', + data_format=None, + pool_mode='max'): + """3D Pooling. + + Arguments: + x: Tensor or variable. + pool_size: tuple of 3 integers. + strides: tuple of 3 integers. + padding: string, `"same"` or `"valid"`. + data_format: string, `"channels_last"` or `"channels_first"`. + pool_mode: string, `"max"` or `"avg"`. + + Returns: + A tensor, result of 3D pooling. + + Raises: + ValueError: if `data_format` is neither `"channels_last"` or + `"channels_first"`. + ValueError: if `pool_mode` is neither `"max"` or `"avg"`. + """ + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + + x, tf_data_format = _preprocess_conv3d_input(x, data_format) + padding = _preprocess_padding(padding) + if tf_data_format == 'NDHWC': + strides = (1,) + strides + (1,) + pool_size = (1,) + pool_size + (1,) + else: + strides = (1, 1) + strides + pool_size = (1, 1) + pool_size + + if pool_mode == 'max': + x = nn.max_pool3d( + x, pool_size, strides, padding=padding, data_format=tf_data_format) + elif pool_mode == 'avg': + x = nn.avg_pool3d( + x, pool_size, strides, padding=padding, data_format=tf_data_format) + else: + raise ValueError('Invalid pooling mode: ' + str(pool_mode)) + + if data_format == 'channels_first' and tf_data_format == 'NDHWC': + x = array_ops.transpose(x, (0, 4, 1, 2, 3)) + return x + + +def local_conv(inputs, + kernel, + kernel_size, + strides, + output_shape, + data_format=None): + """Apply N-D convolution with un-shared weights. + + Arguments: + inputs: (N+2)-D tensor with shape + (batch_size, channels_in, d_in1, ..., d_inN) + if data_format='channels_first', or + (batch_size, d_in1, ..., d_inN, channels_in) + if data_format='channels_last'. + kernel: the unshared weight for N-D convolution, + with shape (output_items, feature_dim, channels_out), where + feature_dim = np.prod(kernel_size) * channels_in, + output_items = np.prod(output_shape). + kernel_size: a tuple of N integers, specifying the + spatial dimensions of the N-D convolution window. + strides: a tuple of N integers, specifying the strides + of the convolution along the spatial dimensions. + output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial + dimensionality of the output. + data_format: string, "channels_first" or "channels_last". + + Returns: + An (N+2)-D tensor with shape: + (batch_size, channels_out) + output_shape + if data_format='channels_first', or: + (batch_size,) + output_shape + (channels_out,) + if data_format='channels_last'. + + Raises: + ValueError: if `data_format` is neither + `channels_last` nor `channels_first`. + """ + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + + kernel_shape = int_shape(kernel) + feature_dim = kernel_shape[1] + channels_out = kernel_shape[-1] + ndims = len(output_shape) + spatial_dimensions = list(range(ndims)) + + xs = [] + output_axes_ticks = [range(axis_max) for axis_max in output_shape] + for position in itertools.product(*output_axes_ticks): + slices = [slice(None)] + + if data_format == 'channels_first': + slices.append(slice(None)) + + slices.extend([slice(position[d] * strides[d], + position[d] * strides[d] + kernel_size[d]) + for d in spatial_dimensions]) + + if data_format == 'channels_last': + slices.append(slice(None)) + + xs.append(reshape(inputs[slices], (1, -1, feature_dim))) + + x_aggregate = concatenate(xs, axis=0) + output = batch_dot(x_aggregate, kernel) + output = reshape(output, output_shape + (-1, channels_out)) + + if data_format == 'channels_first': + permutation = [ndims, ndims + 1] + spatial_dimensions + else: + permutation = [ndims] + spatial_dimensions + [ndims + 1] + + return permute_dimensions(output, permutation) + + +@keras_export('keras.backend.local_conv1d') +def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): + """Apply 1D conv with un-shared weights. + + Arguments: + inputs: 3D tensor with shape: + (batch_size, steps, input_dim) + if data_format is "channels_last" or + (batch_size, input_dim, steps) + if data_format is "channels_first". + kernel: the unshared weight for convolution, + with shape (output_length, feature_dim, filters). + kernel_size: a tuple of a single integer, + specifying the length of the 1D convolution window. + strides: a tuple of a single integer, + specifying the stride length of the convolution. + data_format: the data format, channels_first or channels_last. + + Returns: + A 3d tensor with shape: + (batch_size, output_length, filters) + if data_format='channels_first' + or 3D tensor with shape: + (batch_size, filters, output_length) + if data_format='channels_last'. + """ + output_shape = (kernel.shape[0],) + return local_conv(inputs, + kernel, + kernel_size, + strides, + output_shape, + data_format) + + +@keras_export('keras.backend.local_conv2d') +def local_conv2d(inputs, + kernel, + kernel_size, + strides, + output_shape, + data_format=None): + """Apply 2D conv with un-shared weights. + + Arguments: + inputs: 4D tensor with shape: + (batch_size, filters, new_rows, new_cols) + if data_format='channels_first' + or 4D tensor with shape: + (batch_size, new_rows, new_cols, filters) + if data_format='channels_last'. + kernel: the unshared weight for convolution, + with shape (output_items, feature_dim, filters). + kernel_size: a tuple of 2 integers, specifying the + width and height of the 2D convolution window. + strides: a tuple of 2 integers, specifying the strides + of the convolution along the width and height. + output_shape: a tuple with (output_row, output_col). + data_format: the data format, channels_first or channels_last. + + Returns: + A 4D tensor with shape: + (batch_size, filters, new_rows, new_cols) + if data_format='channels_first' + or 4D tensor with shape: + (batch_size, new_rows, new_cols, filters) + if data_format='channels_last'. + """ + return local_conv(inputs, + kernel, + kernel_size, + strides, + output_shape, + data_format) + + +@keras_export('keras.backend.bias_add') +def bias_add(x, bias, data_format=None): + """Adds a bias vector to a tensor. + + Arguments: + x: Tensor or variable. + bias: Bias tensor to add. + data_format: string, `"channels_last"` or `"channels_first"`. + + Returns: + Output tensor. + + Raises: + ValueError: In one of the two cases below: + 1. invalid `data_format` argument. + 2. invalid bias shape. + the bias should be either a vector or + a tensor with ndim(x) - 1 dimension + """ + if data_format is None: + data_format = image_data_format() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('Unknown data_format: ' + str(data_format)) + bias_shape = int_shape(bias) + if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1: + raise ValueError( + 'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' % + (len(bias_shape), ndim(x))) + # pylint: disable=g-no-augmented-assignment + if ndim(x) == 5: + if data_format == 'channels_first': + if len(bias_shape) == 1: + x = x + reshape(bias, (1, bias_shape[0], 1, 1, 1)) + else: + x = x + reshape(bias, (1, bias_shape[3]) + bias_shape[:3]) + elif data_format == 'channels_last': + if len(bias_shape) == 1: + x = x + reshape(bias, (1, 1, 1, bias_shape[0])) + else: + x = x + reshape(bias, (1,) + bias_shape) + elif ndim(x) == 4: + if data_format == 'channels_first': + if len(bias_shape) == 1: + if _has_nchw_support(): + x = nn.bias_add(x, bias, data_format='NCHW') + else: + x = x + reshape(bias, (1, bias_shape[0], 1, 1)) + else: + x = x + reshape(bias, (1, bias_shape[2]) + bias_shape[:2]) + elif data_format == 'channels_last': + if len(bias_shape) == 1: + x = nn.bias_add(x, bias, data_format='NHWC') + else: + x = x + reshape(bias, (1,) + bias_shape) + elif ndim(x) == 3: + if data_format == 'channels_first': + if len(bias_shape) == 1: + x = x + reshape(bias, (1, bias_shape[0], 1)) + else: + x = x + reshape(bias, (1, bias_shape[1], bias_shape[0])) + elif data_format == 'channels_last': + if len(bias_shape) == 1: + x = x + reshape(bias, (1, 1, bias_shape[0])) + else: + x = x + reshape(bias, (1,) + bias_shape) + else: + x = nn.bias_add(x, bias) + # pylint: enable=g-no-augmented-assignment + return x + + +# RANDOMNESS + + +@keras_export('keras.backend.random_normal') +def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + """Returns a tensor with normal distribution of values. + + Arguments: + shape: A tuple of integers, the shape of tensor to create. + mean: A float, mean of the normal distribution to draw samples. + stddev: A float, standard deviation of the normal distribution + to draw samples. + dtype: String, dtype of returned tensor. + seed: Integer, random seed. + + Returns: + A tensor. + """ + if dtype is None: + dtype = floatx() + if seed is None: + seed = np.random.randint(10e6) + return random_ops.random_normal( + shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) + + +@keras_export('keras.backend.random_uniform') +def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): + """Returns a tensor with uniform distribution of values. + + Arguments: + shape: A tuple of integers, the shape of tensor to create. + minval: A float, lower boundary of the uniform distribution + to draw samples. + maxval: A float, upper boundary of the uniform distribution + to draw samples. + dtype: String, dtype of returned tensor. + seed: Integer, random seed. + + Returns: + A tensor. + """ + if dtype is None: + dtype = floatx() + if seed is None: + seed = np.random.randint(10e6) + return random_ops.random_uniform( + shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) + + +@keras_export('keras.backend.random_binomial') +def random_binomial(shape, p=0.0, dtype=None, seed=None): + """Returns a tensor with random binomial distribution of values. + + The binomial distribution with parameters `n` and `p` is the probability + distribution of the number of successful Bernoulli process. Only supports + `n` = 1 for now. + + Arguments: + shape: A tuple of integers, the shape of tensor to create. + p: A float, `0. <= p <= 1`, probability of binomial distribution. + dtype: String, dtype of returned tensor. + seed: Integer, random seed. + + Returns: + A tensor. + """ + if dtype is None: + dtype = floatx() + if seed is None: + seed = np.random.randint(10e6) + return array_ops.where( + random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p, + array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype)) + + +@keras_export('keras.backend.truncated_normal') +def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): + """Returns a tensor with truncated random normal distribution of values. + + The generated values follow a normal distribution + with specified mean and standard deviation, + except that values whose magnitude is more than + two standard deviations from the mean are dropped and re-picked. + + Arguments: + shape: A tuple of integers, the shape of tensor to create. + mean: Mean of the values. + stddev: Standard deviation of the values. + dtype: String, dtype of returned tensor. + seed: Integer, random seed. + + Returns: + A tensor. + """ + if dtype is None: + dtype = floatx() + if seed is None: + seed = np.random.randint(10e6) + return random_ops.truncated_normal( + shape, mean, stddev, dtype=dtype, seed=seed) + + +# CTC +# TensorFlow has a native implementation, but it uses sparse tensors +# and therefore requires a wrapper for Keras. The functions below convert +# dense to sparse tensors and also wraps up the beam search code that is +# in TensorFlow's CTC implementation + + +@keras_export('keras.backend.ctc_label_dense_to_sparse') +def ctc_label_dense_to_sparse(labels, label_lengths): + """Converts CTC labels from dense to sparse. + + Arguments: + labels: dense CTC labels. + label_lengths: length of the labels. + + Returns: + A sparse tensor representation of the labels. + """ + label_shape = array_ops.shape(labels) + num_batches_tns = array_ops.stack([label_shape[0]]) + max_num_labels_tns = array_ops.stack([label_shape[1]]) + + def range_less_than(old_input, current_input): + return array_ops.expand_dims( + math_ops.range(array_ops.shape(old_input)[1]), 0) < array_ops.fill( + max_num_labels_tns, current_input) + + init = math_ops.cast( + array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool) + dense_mask = functional_ops.scan( + range_less_than, label_lengths, initializer=init, parallel_iterations=1) + dense_mask = dense_mask[:, 0, :] + + label_array = array_ops.reshape( + array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns), + label_shape) + label_ind = array_ops.boolean_mask(label_array, dense_mask) + + batch_array = array_ops.transpose( + array_ops.reshape( + array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns), + reverse(label_shape, 0))) + batch_ind = array_ops.boolean_mask(batch_array, dense_mask) + indices = array_ops.transpose( + array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1])) + + vals_sparse = array_ops.gather_nd(labels, indices) + + return sparse_tensor.SparseTensor( + math_ops.cast(indices, dtypes_module.int64), vals_sparse, + math_ops.cast(label_shape, dtypes_module.int64)) + + +@keras_export('keras.backend.ctc_batch_cost') +def ctc_batch_cost(y_true, y_pred, input_length, label_length): + """Runs CTC loss algorithm on each batch element. + + Arguments: + y_true: tensor `(samples, max_string_length)` + containing the truth labels. + y_pred: tensor `(samples, time_steps, num_categories)` + containing the prediction, or output of the softmax. + input_length: tensor `(samples, 1)` containing the sequence length for + each batch item in `y_pred`. + label_length: tensor `(samples, 1)` containing the sequence length for + each batch item in `y_true`. + + Returns: + Tensor with shape (samples,1) containing the + CTC loss of each element. + """ + label_length = math_ops.cast( + array_ops.squeeze(label_length, axis=-1), dtypes_module.int32) + input_length = math_ops.cast( + array_ops.squeeze(input_length, axis=-1), dtypes_module.int32) + sparse_labels = math_ops.cast( + ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32) + + y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon()) + + return array_ops.expand_dims( + ctc.ctc_loss( + inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1) + + +@keras_export('keras.backend.ctc_decode') +def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): + """Decodes the output of a softmax. + + Can use either greedy search (also known as best path) + or a constrained dictionary search. + + Arguments: + y_pred: tensor `(samples, time_steps, num_categories)` + containing the prediction, or output of the softmax. + input_length: tensor `(samples, )` containing the sequence length for + each batch item in `y_pred`. + greedy: perform much faster best-path search if `true`. + This does not use a dictionary. + beam_width: if `greedy` is `false`: a beam search decoder will be used + with a beam of this width. + top_paths: if `greedy` is `false`, + how many of the most probable paths will be returned. + + Returns: + Tuple: + List: if `greedy` is `true`, returns a list of one element that + contains the decoded sequence. + If `false`, returns the `top_paths` most probable + decoded sequences. + Important: blank labels are returned as `-1`. + Tensor `(top_paths, )` that contains + the log probability of each decoded sequence. + """ + y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon()) + input_length = math_ops.cast(input_length, dtypes_module.int32) + + if greedy: + (decoded, log_prob) = ctc.ctc_greedy_decoder( + inputs=y_pred, sequence_length=input_length) + else: + (decoded, log_prob) = ctc.ctc_beam_search_decoder( + inputs=y_pred, + sequence_length=input_length, + beam_width=beam_width, + top_paths=top_paths) + decoded_dense = [ + sparse_ops.sparse_to_dense( + st.indices, st.dense_shape, st.values, default_value=-1) + for st in decoded + ] + return (decoded_dense, log_prob) + + +# HIGH ORDER FUNCTIONS + + +@keras_export('keras.backend.map_fn') +def map_fn(fn, elems, name=None, dtype=None): + """Map the function fn over the elements elems and return the outputs. + + Arguments: + fn: Callable that will be called upon each element in elems + elems: tensor + name: A string name for the map node in the graph + dtype: Output data type. + + Returns: + Tensor with dtype `dtype`. + """ + return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype) + + +@keras_export('keras.backend.foldl') +def foldl(fn, elems, initializer=None, name=None): + """Reduce elems using fn to combine them from left to right. + + Arguments: + fn: Callable that will be called upon each element in elems and an + accumulator, for instance `lambda acc, x: acc + x` + elems: tensor + initializer: The first value used (`elems[0]` in case of None) + name: A string name for the foldl node in the graph + + Returns: + Tensor with same type and shape as `initializer`. + """ + return functional_ops.foldl(fn, elems, initializer=initializer, name=name) + + +@keras_export('keras.backend.foldr') +def foldr(fn, elems, initializer=None, name=None): + """Reduce elems using fn to combine them from right to left. + + Arguments: + fn: Callable that will be called upon each element in elems and an + accumulator, for instance `lambda acc, x: acc + x` + elems: tensor + initializer: The first value used (`elems[-1]` in case of None) + name: A string name for the foldr node in the graph + + Returns: + Same type and shape as initializer + """ + return functional_ops.foldr(fn, elems, initializer=initializer, name=name) + +# Load Keras default configuration from config file if present. +# Set Keras base dir path given KERAS_HOME env variable, if applicable. +# Otherwise either ~/.keras or /tmp. +if 'KERAS_HOME' in os.environ: + _keras_dir = os.environ.get('KERAS_HOME') +else: + _keras_base_dir = os.path.expanduser('~') + _keras_dir = os.path.join(_keras_base_dir, '.keras') +_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json')) +if os.path.exists(_config_path): + try: + _config = json.load(open(_config_path)) + except ValueError: + _config = {} + _floatx = _config.get('floatx', floatx()) + assert _floatx in {'float16', 'float32', 'float64'} + _epsilon = _config.get('epsilon', epsilon()) + assert isinstance(_epsilon, float) + _image_data_format = _config.get('image_data_format', image_data_format()) + assert _image_data_format in {'channels_last', 'channels_first'} + set_floatx(_floatx) + set_epsilon(_epsilon) + set_image_data_format(_image_data_format) + +# Save config file. +if not os.path.exists(_keras_dir): + try: + os.makedirs(_keras_dir) + except OSError: + # Except permission denied and potential race conditions + # in multi-threaded environments. + pass + +if not os.path.exists(_config_path): + _config = { + 'floatx': floatx(), + 'epsilon': epsilon(), + 'backend': 'tensorflow', + 'image_data_format': image_data_format() + } + try: + with open(_config_path, 'w') as f: + f.write(json.dumps(_config, indent=4)) + except IOError: + # Except permission denied. + pass + + +def configure_and_create_distributed_session(distribution_strategy): + """Configure session config and create a session with it.""" + + def _create_session(distribution_strategy): + """Create the Distributed Strategy session.""" + session_config = get_default_session_config() + + # If a session already exists, merge in its config; in the case there is a + # conflict, take values of the existing config. + global _SESSION + if getattr(_SESSION, 'session', None) and _SESSION.session._config: + session_config.MergeFrom(_SESSION.session._config) + + if is_tpu_strategy(distribution_strategy): + # TODO(priyag, yuefengz): Remove this workaround when Distribute + # Coordinator is integrated with keras and we can create a session from + # there. + distribution_strategy.configure(session_config) + master = distribution_strategy.extended._tpu_cluster_resolver.master() # pylint: disable=protected-access + session = session_module.Session(config=session_config, target=master) + else: + worker_context = dc_context.get_current_worker_context() + if worker_context: + dc_session_config = worker_context.session_config + # Merge the default session config to the one from distribute + # coordinator, which is fine for now since they don't have + # conflicting configurations. + dc_session_config.MergeFrom(session_config) + session = session_module.Session( + config=dc_session_config, target=worker_context.master_target) + else: + distribution_strategy.configure(session_config) + session = session_module.Session(config=session_config) + + set_session(session) + + if multi_worker_util.in_multi_worker_mode(): + dc.run_distribute_coordinator( + _create_session, + distribution_strategy, + mode=dc.CoordinatorMode.INDEPENDENT_WORKER) + else: + _create_session(distribution_strategy) + + +def is_tpu_strategy(strategy): + """We're executing TPU Strategy.""" + return (strategy is not None and + strategy.__class__.__name__.startswith('TPUStrategy')) + + +def cast_variables_to_tensor(tensors): + + def _cast_variables_to_tensor(tensor): + if isinstance(tensor, variables_module.Variable): + return array_ops.identity(tensor) + return tensor + + return nest.map_structure(_cast_variables_to_tensor, tensors) + + +def _is_symbolic_tensor(x): + return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor) \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/my_losses.py b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/my_losses.py new file mode 100644 index 000000000..f04500fcf --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/my_losses.py @@ -0,0 +1,1197 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Built-in loss functions. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc + +import six + +from tensorflow.python.distribute import distribution_strategy_context +from tensorflow.python.framework import ops +from tensorflow.python.framework import smart_cond +from tensorflow.python.framework import tensor_util +from tensorflow.python.keras import backend as K +import my_backend +from tensorflow.python.keras.utils import losses_utils +from tensorflow.python.keras.utils import tf_utils +from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object +from tensorflow.python.keras.utils.generic_utils import serialize_keras_object +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops.losses import losses_impl +from tensorflow.python.ops.losses import util as tf_losses_util +from tensorflow.python.util.tf_export import keras_export +from tensorflow.tools.docs import doc_controls + + +@keras_export('keras.losses.Loss') +class Loss(object): + """Loss base class. + + To be implemented by subclasses: + * `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`. + + Example subclass implementation: + ``` + class MeanSquaredError(Loss): + def call(self, y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = math_ops.cast(y_true, y_pred.dtype) + return K.mean(math_ops.square(y_pred - y_true), axis=-1) + ``` + + When used with `tf.distribute.Strategy`, outside of built-in training loops + such as `tf.keras` `compile` and `fit`, please use 'SUM' or 'NONE' reduction + types, and reduce losses explicitly in your training loop. Using 'AUTO' or + 'SUM_OVER_BATCH_SIZE' will raise an error. + + Please see + https://www.tensorflow.org/alpha/tutorials/distribute/training_loops for more + details on this. + + You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like: + ``` + with strategy.scope(): + loss_obj = tf.keras.losses.CategoricalCrossentropy( + reduction=tf.keras.losses.Reduction.NONE) + .... + loss = (tf.reduce_sum(loss_obj(labels, predictions)) * + (1. / global_batch_size)) + ``` + + Args: + reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. + Default value is `AUTO`. `AUTO` indicates that the reduction option will + be determined by the usage context. For almost all cases this defaults to + `SUM_OVER_BATCH_SIZE`. + When used with `tf.distribute.Strategy`, outside of built-in training + loops such as `tf.keras` `compile` and `fit`, using `AUTO` or + `SUM_OVER_BATCH_SIZE` will raise an error. Please see + https://www.tensorflow.org/alpha/tutorials/distribute/training_loops + for more details on this. + name: Optional name for the op. + """ + + def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None): + losses_utils.ReductionV2.validate(reduction) + self.reduction = reduction + self.name = name + + def __call__(self, y_true, y_pred, sample_weight=None): + """Invokes the `Loss` instance. + + Args: + y_true: Ground truth values. shape = `[batch_size, d0, .. dN]` + y_pred: The predicted values. shape = `[batch_size, d0, .. dN]` + sample_weight: Optional `sample_weight` acts as a + coefficient for the loss. If a scalar is provided, then the loss is + simply scaled by the given value. If `sample_weight` is a tensor of size + `[batch_size]`, then the total loss for each sample of the batch is + rescaled by the corresponding element in the `sample_weight` vector. If + the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be + broadcasted to this shape), then each loss element of `y_pred` is scaled + by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss + functions reduce by 1 dimension, usually axis=-1.) + + Returns: + Weighted loss float `Tensor`. If `reduction` is `NONE`, this has + shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1` + because all loss functions reduce by 1 dimension, usually axis=-1.) + + Raises: + ValueError: If the shape of `sample_weight` is invalid. + """ + # If we are wrapping a lambda function strip '<>' from the name as it is not + # accepted in scope name. + scope_name = 'lambda' if self.name == '' else self.name + graph_ctx = tf_utils.graph_context_for_symbolic_tensors( + y_true, y_pred, sample_weight) + with K.name_scope(scope_name or self.__class__.__name__), graph_ctx: + losses = self.call(y_true, y_pred) + return losses_utils.compute_weighted_loss( + losses, sample_weight, reduction=self._get_reduction()) + + @classmethod + def from_config(cls, config): + """Instantiates a `Loss` from its config (output of `get_config()`). + + Args: + config: Output of `get_config()`. + + Returns: + A `Loss` instance. + """ + return cls(**config) + + def get_config(self): + return {'reduction': self.reduction, 'name': self.name} + + @abc.abstractmethod + @doc_controls.for_subclass_implementers + def call(self, y_true, y_pred): + """Invokes the `Loss` instance. + + Args: + y_true: Ground truth values, with the same shape as 'y_pred'. + y_pred: The predicted values. + """ + NotImplementedError('Must be implemented in subclasses.') + + def _get_reduction(self): + """Handles `AUTO` reduction cases and returns the reduction value.""" + if distribution_strategy_context.has_strategy() and ( + self.reduction == losses_utils.ReductionV2.AUTO or + self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE): + raise ValueError( + 'Please use `tf.keras.losses.Reduction.SUM` or ' + '`tf.keras.losses.Reduction.NONE` for loss reduction when losses are ' + 'used with `tf.distribute.Strategy` outside of the built-in training ' + 'loops. You can implement ' + '`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch ' + 'size like:\n```\nwith strategy.scope():\n' + ' loss_obj = tf.keras.losses.CategoricalCrossentropy(' + 'reduction=tf.keras.losses.reduction.NONE)\n....\n' + ' loss = tf.reduce_sum(loss_obj(labels, predictions)) * ' + '(1. / global_batch_size)\n```\nPlease see ' + 'https://www.tensorflow.org/alpha/tutorials/distribute/training_loops' + ' for more details.') + + if self.reduction == losses_utils.ReductionV2.AUTO: + return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE + return self.reduction + + +class LossFunctionWrapper(Loss): + """Wraps a loss function in the `Loss` class. + + Args: + fn: The loss function to wrap, with signature `fn(y_true, y_pred, + **kwargs)`. + reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. + Default value is `AUTO`. `AUTO` indicates that the reduction option will + be determined by the usage context. For almost all cases this defaults to + `SUM_OVER_BATCH_SIZE`. + When used with `tf.distribute.Strategy`, outside of built-in training + loops such as `tf.keras` `compile` and `fit`, using `AUTO` or + `SUM_OVER_BATCH_SIZE` will raise an error. Please see + https://www.tensorflow.org/alpha/tutorials/distribute/training_loops + for more details on this. + name: (Optional) name for the loss. + **kwargs: The keyword arguments that are passed on to `fn`. + """ + + def __init__(self, + fn, + reduction=losses_utils.ReductionV2.AUTO, + name=None, + **kwargs): + super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name) + self.fn = fn + self._fn_kwargs = kwargs + + def call(self, y_true, y_pred): + """Invokes the `LossFunctionWrapper` instance. + + Args: + y_true: Ground truth values. + y_pred: The predicted values. + + Returns: + Loss values per sample. + """ + if tensor_util.is_tensor(y_pred) and tensor_util.is_tensor(y_true): + y_pred, y_true = tf_losses_util.squeeze_or_expand_dimensions( + y_pred, y_true) + return self.fn(y_true, y_pred, **self._fn_kwargs) + + def get_config(self): + config = {} + for k, v in six.iteritems(self._fn_kwargs): + config[k] = K.eval(v) if tf_utils.is_tensor_or_variable(v) else v + base_config = super(LossFunctionWrapper, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + +@keras_export('keras.losses.MeanSquaredError') +class MeanSquaredError(LossFunctionWrapper): + """Computes the mean of squares of errors between labels and predictions. + + `loss = square(y_true - y_pred)` + + Usage: + + ```python + mse = tf.keras.losses.MeanSquaredError() + loss = mse([0., 0., 1., 1.], [1., 1., 1., 0.]) + print('Loss: ', loss.numpy()) # Loss: 0.75 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.MeanSquaredError()) + ``` + """ + + def __init__(self, + reduction=losses_utils.ReductionV2.AUTO, + name='mean_squared_error'): + super(MeanSquaredError, self).__init__( + mean_squared_error, name=name, reduction=reduction) + + +@keras_export('keras.losses.MeanAbsoluteError') +class MeanAbsoluteError(LossFunctionWrapper): + """Computes the mean of absolute difference between labels and predictions. + + `loss = abs(y_true - y_pred)` + + Usage: + + ```python + mae = tf.keras.losses.MeanAbsoluteError() + loss = mae([0., 0., 1., 1.], [1., 1., 1., 0.]) + print('Loss: ', loss.numpy()) # Loss: 0.75 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.MeanAbsoluteError()) + ``` + """ + + def __init__(self, + reduction=losses_utils.ReductionV2.AUTO, + name='mean_absolute_error'): + super(MeanAbsoluteError, self).__init__( + mean_absolute_error, name=name, reduction=reduction) + + +@keras_export('keras.losses.MeanAbsolutePercentageError') +class MeanAbsolutePercentageError(LossFunctionWrapper): + """Computes the mean absolute percentage error between `y_true` and `y_pred`. + + `loss = 100 * abs(y_true - y_pred) / y_true` + + Usage: + + ```python + mape = tf.keras.losses.MeanAbsolutePercentageError() + loss = mape([0., 0., 1., 1.], [1., 1., 1., 0.]) + print('Loss: ', loss.numpy()) # Loss: 5e+08 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.MeanAbsolutePercentageError()) + ``` + """ + + def __init__(self, + reduction=losses_utils.ReductionV2.AUTO, + name='mean_absolute_percentage_error'): + super(MeanAbsolutePercentageError, self).__init__( + mean_absolute_percentage_error, name=name, reduction=reduction) + + +@keras_export('keras.losses.MeanSquaredLogarithmicError') +class MeanSquaredLogarithmicError(LossFunctionWrapper): + """Computes the mean squared logarithmic error between `y_true` and `y_pred`. + + `loss = square(log(y_true) - log(y_pred))` + + Usage: + + ```python + msle = tf.keras.losses.MeanSquaredLogarithmicError() + loss = msle([0., 0., 1., 1.], [1., 1., 1., 0.]) + print('Loss: ', loss.numpy()) # Loss: 0.36034 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError()) + ``` + """ + + def __init__(self, + reduction=losses_utils.ReductionV2.AUTO, + name='mean_squared_logarithmic_error'): + super(MeanSquaredLogarithmicError, self).__init__( + mean_squared_logarithmic_error, name=name, reduction=reduction) + + +@keras_export('keras.losses.BinaryCrossentropy') +class BinaryCrossentropy(LossFunctionWrapper): + """Computes the cross-entropy loss between true labels and predicted labels. + + Use this cross-entropy loss when there are only two label classes (assumed to + be 0 and 1). For each example, there should be a single floating-point value + per prediction. + + In the snippet below, each of the four examples has only a single + floating-pointing value, and both `y_pred` and `y_true` have the shape + `[batch_size]`. + + Usage: + + ```python + bce = tf.keras.losses.BinaryCrossentropy() + loss = bce([0., 0., 1., 1.], [1., 1., 1., 0.]) + print('Loss: ', loss.numpy()) # Loss: 11.522857 + ``` + + Usage with the `tf.keras` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.BinaryCrossentropy()) + ``` + + Args: + from_logits: Whether to interpret `y_pred` as a tensor of + [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we assume + that `y_pred` contains probabilities (i.e., values in [0, 1]). + Note: Using from_logits=True may be more numerically stable. + label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0, we + compute the loss between the predicted labels and a smoothed version of + the true labels, where the smoothing squeezes the labels towards 0.5. + Larger values of `label_smoothing` correspond to heavier smoothing. + reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. + Default value is `AUTO`. `AUTO` indicates that the reduction option will + be determined by the usage context. For almost all cases this defaults to + `SUM_OVER_BATCH_SIZE`. + When used with `tf.distribute.Strategy`, outside of built-in training + loops such as `tf.keras` `compile` and `fit`, using `AUTO` or + `SUM_OVER_BATCH_SIZE` will raise an error. Please see + https://www.tensorflow.org/alpha/tutorials/distribute/training_loops + for more details on this. + name: (Optional) Name for the op. + """ + + def __init__(self, + from_logits=False, + label_smoothing=0, + reduction=losses_utils.ReductionV2.AUTO, + name='binary_crossentropy'): + super(BinaryCrossentropy, self).__init__( + binary_crossentropy, + name=name, + reduction=reduction, + from_logits=from_logits, + label_smoothing=label_smoothing) + self.from_logits = from_logits + + +@keras_export('keras.losses.CategoricalCrossentropy') +class CategoricalCrossentropy(LossFunctionWrapper): + """Computes the crossentropy loss between the labels and predictions. + + Use this crossentropy loss function when there are two or more label classes. + We expect labels to be provided in a `one_hot` representation. If you want to + provide labels as integers, please use `SparseCategoricalCrossentropy` loss. + There should be `# classes` floating point values per feature. + + In the snippet below, there is `# classes` floating pointing values per + example. The shape of both `y_pred` and `y_true` are + `[batch_size, num_classes]`. + + Usage: + + ```python + cce = tf.keras.losses.CategoricalCrossentropy() + loss = cce( + [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], + [[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]]) + print('Loss: ', loss.numpy()) # Loss: 0.0945 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.CategoricalCrossentropy()) + ``` + + Args: + from_logits: Whether `y_pred` is expected to be a logits tensor. By default, + we assume that `y_pred` encodes a probability distribution. + Note: Using from_logits=True may be more numerically stable. + label_smoothing: Float in [0, 1]. When > 0, label values are smoothed, + meaning the confidence on label values are relaxed. e.g. + `label_smoothing=0.2` means that we will use a value of `0.1` for label + `0` and `0.9` for label `1`" + reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. + Default value is `AUTO`. `AUTO` indicates that the reduction option will + be determined by the usage context. For almost all cases this defaults to + `SUM_OVER_BATCH_SIZE`. + When used with `tf.distribute.Strategy`, outside of built-in training + loops such as `tf.keras` `compile` and `fit`, using `AUTO` or + `SUM_OVER_BATCH_SIZE` will raise an error. Please see + https://www.tensorflow.org/alpha/tutorials/distribute/training_loops + for more details on this. + name: Optional name for the op. + """ + + def __init__(self, + from_logits=False, + label_smoothing=0, + reduction=losses_utils.ReductionV2.AUTO, + name='categorical_crossentropy'): + super(CategoricalCrossentropy, self).__init__( + categorical_crossentropy, + name=name, + reduction=reduction, + from_logits=from_logits, + label_smoothing=label_smoothing) + + +@keras_export('keras.losses.SparseCategoricalCrossentropy') +class SparseCategoricalCrossentropy(LossFunctionWrapper): + """Computes the crossentropy loss between the labels and predictions. + + Use this crossentropy loss function when there are two or more label classes. + We expect labels to be provided as integers. If you want to provide labels + using `one-hot` representation, please use `CategoricalCrossentropy` loss. + There should be `# classes` floating point values per feature for `y_pred` + and a single floating point value per feature for `y_true`. + + In the snippet below, there is a single floating point value per example for + `y_true` and `# classes` floating pointing values per example for `y_pred`. + The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is + `[batch_size, num_classes]`. + + Usage: + + ```python + cce = tf.keras.losses.SparseCategoricalCrossentropy() + loss = cce( + [0, 1, 2], + [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]]) + print('Loss: ', loss.numpy()) # Loss: 0.3239 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy()) + ``` + + Args: + from_logits: Whether `y_pred` is expected to be a logits tensor. By default, + we assume that `y_pred` encodes a probability distribution. + Note: Using from_logits=True may be more numerically stable. + reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. + Default value is `AUTO`. `AUTO` indicates that the reduction option will + be determined by the usage context. For almost all cases this defaults to + `SUM_OVER_BATCH_SIZE`. + When used with `tf.distribute.Strategy`, outside of built-in training + loops such as `tf.keras` `compile` and `fit`, using `AUTO` or + `SUM_OVER_BATCH_SIZE` will raise an error. Please see + https://www.tensorflow.org/alpha/tutorials/distribute/training_loops + for more details on this. + name: Optional name for the op. + """ + + def __init__(self, + from_logits=False, + reduction=losses_utils.ReductionV2.AUTO, + name='sparse_categorical_crossentropy'): + super(SparseCategoricalCrossentropy, self).__init__( + sparse_categorical_crossentropy, + name=name, + reduction=reduction, + from_logits=from_logits) + + +@keras_export('keras.losses.Hinge') +class Hinge(LossFunctionWrapper): + """Computes the hinge loss between `y_true` and `y_pred`. + + `loss = maximum(1 - y_true * y_pred, 0)` + + `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are + provided we will convert them to -1 or 1. + + Usage: + + ```python + h = tf.keras.losses.Hinge() + loss = h([-1., 1., 1.], [0.6, -0.7, -0.5]) + + # loss = max(0, 1 - y_true * y_pred) = [1.6 + 1.7 + 1.5] / 3 + + print('Loss: ', loss.numpy()) # Loss: 1.6 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.Hinge()) + ``` + """ + + def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='hinge'): + super(Hinge, self).__init__(hinge, name=name, reduction=reduction) + + +@keras_export('keras.losses.SquaredHinge') +class SquaredHinge(LossFunctionWrapper): + """Computes the squared hinge loss between `y_true` and `y_pred`. + + `loss = square(maximum(1 - y_true * y_pred, 0))` + + `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are + provided we will convert them to -1 or 1. + + Usage: + + ```python + sh = tf.keras.losses.SquaredHinge() + loss = sh([-1., 1., 1.], [0.6, -0.7, -0.5]) + + # loss = (max(0, 1 - y_true * y_pred))^2 = [1.6^2 + 1.7^2 + 1.5^2] / 3 + + print('Loss: ', loss.numpy()) # Loss: 2.566666 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.SquaredHinge()) + ``` + """ + + def __init__(self, + reduction=losses_utils.ReductionV2.AUTO, + name='squared_hinge'): + super(SquaredHinge, self).__init__( + squared_hinge, name=name, reduction=reduction) + + +@keras_export('keras.losses.CategoricalHinge') +class CategoricalHinge(LossFunctionWrapper): + """Computes the categorical hinge loss between `y_true` and `y_pred`. + + `loss = maximum(neg - pos + 1, 0)` + where `neg = sum(y_true * y_pred)` and `pos = maximum(1 - y_true)` + + Usage: + + ```python + ch = tf.keras.losses.CategoricalHinge() + loss = ch([0., 1., 1.], [1., 0., 1.]) + print('Loss: ', loss.numpy()) # Loss: 1.0 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.CategoricalHinge()) + ``` + """ + + def __init__(self, + reduction=losses_utils.ReductionV2.AUTO, + name='categorical_hinge'): + super(CategoricalHinge, self).__init__( + categorical_hinge, name=name, reduction=reduction) + + +@keras_export('keras.losses.Poisson') +class Poisson(LossFunctionWrapper): + """Computes the Poisson loss between `y_true` and `y_pred`. + + `loss = y_pred - y_true * log(y_pred)` + + Usage: + + ```python + p = tf.keras.losses.Poisson() + loss = p([1., 9., 2.], [4., 8., 12.]) + print('Loss: ', loss.numpy()) # Loss: -0.35702705 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.Poisson()) + ``` + """ + + def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'): + super(Poisson, self).__init__(poisson, name=name, reduction=reduction) + + +@keras_export('keras.losses.LogCosh') +class LogCosh(LossFunctionWrapper): + """Computes the logarithm of the hyperbolic cosine of the prediction error. + + `logcosh = log((exp(x) + exp(-x))/2)`, + where x is the error `y_pred - y_true`. + + Usage: + + ```python + l = tf.keras.losses.LogCosh() + loss = l([0., 1., 1.], [1., 0., 1.]) + print('Loss: ', loss.numpy()) # Loss: 0.289 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.LogCosh()) + ``` + """ + + def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='logcosh'): + super(LogCosh, self).__init__(logcosh, name=name, reduction=reduction) + + +@keras_export('keras.losses.KLDivergence') +class KLDivergence(LossFunctionWrapper): + """Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`. + + `loss = y_true * log(y_true / y_pred)` + + See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence + + Usage: + + ```python + k = tf.keras.losses.KLDivergence() + loss = k([.4, .9, .2], [.5, .8, .12]) + print('Loss: ', loss.numpy()) # Loss: 0.11891246 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.KLDivergence()) + ``` + """ + + def __init__(self, + reduction=losses_utils.ReductionV2.AUTO, + name='kullback_leibler_divergence'): + super(KLDivergence, self).__init__( + kullback_leibler_divergence, name=name, reduction=reduction) + + +@keras_export('keras.losses.Huber') +class Huber(LossFunctionWrapper): + """Computes the Huber loss between `y_true` and `y_pred`. + + For each value x in `error = y_true - y_pred`: + + ``` + loss = 0.5 * x^2 if |x| <= d + loss = 0.5 * d^2 + d * (|x| - d) if |x| > d + ``` + where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss + + Usage: + + ```python + l = tf.keras.losses.Huber() + loss = l([0., 1., 1.], [1., 0., 1.]) + print('Loss: ', loss.numpy()) # Loss: 0.333 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.Huber()) + ``` + + Args: + delta: A float, the point where the Huber loss function changes from a + quadratic to linear. + reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. + Default value is `AUTO`. `AUTO` indicates that the reduction option will + be determined by the usage context. For almost all cases this defaults to + `SUM_OVER_BATCH_SIZE`. + When used with `tf.distribute.Strategy`, outside of built-in training + loops such as `tf.keras` `compile` and `fit`, using `AUTO` or + `SUM_OVER_BATCH_SIZE` will raise an error. Please see + https://www.tensorflow.org/alpha/tutorials/distribute/training_loops + for more details on this. + name: Optional name for the op. + """ + + def __init__(self, + delta=1.0, + reduction=losses_utils.ReductionV2.AUTO, + name='huber_loss'): + super(Huber, self).__init__( + huber_loss, name=name, reduction=reduction, delta=delta) + + +@keras_export('keras.metrics.mean_squared_error', + 'keras.metrics.mse', + 'keras.metrics.MSE', + 'keras.losses.mean_squared_error', + 'keras.losses.mse', + 'keras.losses.MSE') +def mean_squared_error(y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = math_ops.cast(y_true, y_pred.dtype) + return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1) + + +@keras_export('keras.metrics.mean_absolute_error', + 'keras.metrics.mae', + 'keras.metrics.MAE', + 'keras.losses.mean_absolute_error', + 'keras.losses.mae', + 'keras.losses.MAE') +def mean_absolute_error(y_true, y_pred): + y_pred = ops.convert_to_tensor(y_pred) + y_true = math_ops.cast(y_true, y_pred.dtype) + return K.mean(math_ops.abs(y_pred - y_true), axis=-1) + + +@keras_export('keras.metrics.mean_absolute_percentage_error', + 'keras.metrics.mape', + 'keras.metrics.MAPE', + 'keras.losses.mean_absolute_percentage_error', + 'keras.losses.mape', + 'keras.losses.MAPE') +def mean_absolute_percentage_error(y_true, y_pred): # pylint: disable=missing-docstring + y_pred = ops.convert_to_tensor(y_pred) + y_true = math_ops.cast(y_true, y_pred.dtype) + diff = math_ops.abs( + (y_true - y_pred) / K.clip(math_ops.abs(y_true), K.epsilon(), None)) + return 100. * K.mean(diff, axis=-1) + + +@keras_export('keras.metrics.mean_squared_logarithmic_error', + 'keras.metrics.msle', + 'keras.metrics.MSLE', + 'keras.losses.mean_squared_logarithmic_error', + 'keras.losses.msle', + 'keras.losses.MSLE') +def mean_squared_logarithmic_error(y_true, y_pred): # pylint: disable=missing-docstring + y_pred = ops.convert_to_tensor(y_pred) + y_true = math_ops.cast(y_true, y_pred.dtype) + first_log = math_ops.log(K.clip(y_pred, K.epsilon(), None) + 1.) + second_log = math_ops.log(K.clip(y_true, K.epsilon(), None) + 1.) + return K.mean(math_ops.squared_difference(first_log, second_log), axis=-1) + + +def _maybe_convert_labels(y_true): + """Converts binary labels into -1/1.""" + are_zeros = math_ops.equal(y_true, 0) + are_ones = math_ops.equal(y_true, 1) + is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones)) + + def _convert_binary_labels(): + # Convert the binary labels to -1 or 1. + return 2. * y_true - 1. + + updated_y_true = smart_cond.smart_cond(is_binary, + _convert_binary_labels, lambda: y_true) + return updated_y_true + + +@keras_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge') +def squared_hinge(y_true, y_pred): + """Computes the squared hinge loss between `y_true` and `y_pred`. + + Args: + y_true: The ground truth values. `y_true` values are expected to be -1 or 1. + If binary (0 or 1) labels are provided we will convert them to -1 or 1. + y_pred: The predicted values. + + Returns: + Tensor with one scalar loss entry per sample. + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = math_ops.cast(y_true, y_pred.dtype) + y_true = _maybe_convert_labels(y_true) + return K.mean( + math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1) + + +@keras_export('keras.metrics.hinge', 'keras.losses.hinge') +def hinge(y_true, y_pred): + """Computes the hinge loss between `y_true` and `y_pred`. + + Args: + y_true: The ground truth values. `y_true` values are expected to be -1 or 1. + If binary (0 or 1) labels are provided they will be converted to -1 or 1. + y_pred: The predicted values. + + Returns: + Tensor with one scalar loss entry per sample. + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = math_ops.cast(y_true, y_pred.dtype) + y_true = _maybe_convert_labels(y_true) + return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1) + + +@keras_export('keras.losses.categorical_hinge') +def categorical_hinge(y_true, y_pred): + """Computes the categorical hinge loss between `y_true` and `y_pred`. + + Args: + y_true: The ground truth values. `y_true` values are expected to be -1 or 1. + If binary (0 or 1) labels are provided they will be converted to -1 or 1. + y_pred: The predicted values. + + Returns: + A tensor. + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = math_ops.cast(y_true, y_pred.dtype) + pos = math_ops.reduce_sum(y_true * y_pred, axis=-1) + neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1) + return math_ops.maximum(0., neg - pos + 1.) + + +def huber_loss(y_true, y_pred, delta=1.0): + """Computes Huber loss value. + + For each value x in `error = y_true - y_pred`: + + ``` + loss = 0.5 * x^2 if |x| <= d + loss = 0.5 * d^2 + d * (|x| - d) if |x| > d + ``` + where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss + + Args: + y_true: tensor of true targets. + y_pred: tensor of predicted targets. + delta: A float, the point where the Huber loss function changes from a + quadratic to linear. + + Returns: + Tensor with one scalar loss entry per sample. + """ + y_pred = math_ops.cast(y_pred, dtype=K.floatx()) + y_true = math_ops.cast(y_true, dtype=K.floatx()) + error = math_ops.subtract(y_pred, y_true) + abs_error = math_ops.abs(error) + quadratic = math_ops.minimum(abs_error, delta) + linear = math_ops.subtract(abs_error, quadratic) + return math_ops.add( + math_ops.multiply( + ops.convert_to_tensor(0.5, dtype=quadratic.dtype), + math_ops.multiply(quadratic, quadratic)), + math_ops.multiply(delta, linear)) + + +@keras_export('keras.losses.logcosh') +def logcosh(y_true, y_pred): + """Logarithm of the hyperbolic cosine of the prediction error. + + `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and + to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly + like the mean squared error, but will not be so strongly affected by the + occasional wildly incorrect prediction. + + Arguments: + y_true: tensor of true targets. + y_pred: tensor of predicted targets. + + Returns: + Tensor with one scalar loss entry per sample. + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = math_ops.cast(y_true, y_pred.dtype) + + def _logcosh(x): + return x + nn.softplus(-2. * x) - math_ops.log(2.) + + return K.mean(_logcosh(y_pred - y_true), axis=-1) + + +@keras_export('keras.metrics.categorical_crossentropy', + 'keras.losses.categorical_crossentropy') +def categorical_crossentropy(y_true, + y_pred, + from_logits=False, + label_smoothing=0): + """Computes the categorical crossentropy loss. + + Args: + y_true: tensor of true targets. + y_pred: tensor of predicted targets. + from_logits: Whether `y_pred` is expected to be a logits tensor. By default, + we assume that `y_pred` encodes a probability distribution. + label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. + + Returns: + Categorical crossentropy loss value. + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = math_ops.cast(y_true, y_pred.dtype) + label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx()) + + def _smooth_labels(): + num_classes = math_ops.cast(array_ops.shape(y_true)[1], y_pred.dtype) + return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes) + + y_true = smart_cond.smart_cond(label_smoothing, + _smooth_labels, lambda: y_true) + return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits) + + +@keras_export('keras.metrics.sparse_categorical_crossentropy', + 'keras.losses.sparse_categorical_crossentropy') +def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1): + return my_backend.sparse_categorical_crossentropy( + y_true, y_pred, from_logits=from_logits, axis=axis) + + +@keras_export('keras.metrics.binary_crossentropy', + 'keras.losses.binary_crossentropy') +def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0): # pylint: disable=missing-docstring + y_pred = ops.convert_to_tensor(y_pred) + y_true = math_ops.cast(y_true, y_pred.dtype) + label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx()) + + def _smooth_labels(): + return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing + + y_true = smart_cond.smart_cond(label_smoothing, + _smooth_labels, lambda: y_true) + return K.mean( + K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1) + + +@keras_export('keras.metrics.kullback_leibler_divergence', + 'keras.metrics.kld', + 'keras.metrics.KLD', + 'keras.losses.kullback_leibler_divergence', + 'keras.losses.kld', + 'keras.losses.KLD') +def kullback_leibler_divergence(y_true, y_pred): + """Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`. + + `loss = y_true * log(y_true / y_pred)` + + See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence + + Usage: + + ```python + loss = tf.keras.losses.KLD([.4, .9, .2], [.5, .8, .12]) + print('Loss: ', loss.numpy()) # Loss: 0.11891246 + ``` + + Args: + y_true: Tensor of true targets. + y_pred: Tensor of predicted targets. + + Returns: + A `Tensor` with loss. + + Raises: + TypeError: If `y_true` cannot be cast to the `y_pred.dtype`. + + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = math_ops.cast(y_true, y_pred.dtype) + y_true = K.clip(y_true, K.epsilon(), 1) + y_pred = K.clip(y_pred, K.epsilon(), 1) + return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1) + + +@keras_export('keras.metrics.poisson', 'keras.losses.poisson') +def poisson(y_true, y_pred): + """Computes the Poisson loss between y_true and y_pred. + + The Poisson loss is the mean of the elements of the `Tensor` + `y_pred - y_true * log(y_pred)`. + + Usage: + + ```python + loss = tf.keras.losses.poisson([1.4, 9.3, 2.2], [4.3, 8.2, 12.2]) + print('Loss: ', loss.numpy()) # Loss: -0.8045559 + ``` + + Args: + y_true: Tensor of true targets. + y_pred: Tensor of predicted targets. + + Returns: + A `Tensor` with the mean Poisson loss. + + Raises: + InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes. + """ + y_pred = ops.convert_to_tensor(y_pred) + y_true = math_ops.cast(y_true, y_pred.dtype) + return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1) + + +@keras_export( + 'keras.losses.cosine_similarity', + v1=[ + 'keras.metrics.cosine_proximity', + 'keras.metrics.cosine', + 'keras.losses.cosine_proximity', + 'keras.losses.cosine', + 'keras.losses.cosine_similarity', + ]) +def cosine_similarity(y_true, y_pred, axis=-1): + """Computes the cosine similarity between labels and predictions. + + Note that it is a negative quantity between -1 and 0, where 0 indicates + orthogonality and values closer to -1 indicate greater similarity. This makes + it usable as a loss function in a setting where you try to maximize the + proximity between predictions and targets. + + `loss = -sum(y_true * y_pred)` + + Args: + y_true: Tensor of true targets. + y_pred: Tensor of predicted targets. + axis: Axis along which to determine similarity. + + Returns: + Cosine similarity tensor. + """ + y_true = nn.l2_normalize(y_true, axis=axis) + y_pred = nn.l2_normalize(y_pred, axis=axis) + return -math_ops.reduce_sum(y_true * y_pred, axis=axis) + + +@keras_export('keras.losses.CosineSimilarity') +class CosineSimilarity(LossFunctionWrapper): + """Computes the cosine similarity between `y_true` and `y_pred`. + + Usage: + + ```python + cosine_loss = tf.keras.losses.CosineSimilarity(axis=1) + loss = cosine_loss([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]]) + # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]] + # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]] + # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]] + # loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)) + = ((0. + 0.) + (0.5 + 0.5)) / 2 + + print('Loss: ', loss.numpy()) # Loss: 0.5 + ``` + + Usage with the `compile` API: + + ```python + model = tf.keras.Model(inputs, outputs) + model.compile('sgd', loss=tf.keras.losses.CosineSimilarity(axis=1)) + ``` + + Args: + axis: (Optional) Defaults to -1. The dimension along which the cosine + similarity is computed. + reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss. + Default value is `AUTO`. `AUTO` indicates that the reduction option will + be determined by the usage context. For almost all cases this defaults to + `SUM_OVER_BATCH_SIZE`. + When used with `tf.distribute.Strategy`, outside of built-in training + loops such as `tf.keras` `compile` and `fit`, using `AUTO` or + `SUM_OVER_BATCH_SIZE` will raise an error. Please see + https://www.tensorflow.org/alpha/tutorials/distribute/training_loops + for more details on this. + name: Optional name for the op. + """ + + def __init__(self, + axis=-1, + reduction=losses_utils.ReductionV2.AUTO, + name='cosine_similarity'): + super(CosineSimilarity, self).__init__( + cosine_similarity, reduction=reduction, name=name, axis=axis) + + +# Aliases. + +mse = MSE = mean_squared_error +mae = MAE = mean_absolute_error +mape = MAPE = mean_absolute_percentage_error +msle = MSLE = mean_squared_logarithmic_error +kld = KLD = kullback_leibler_divergence + + +def is_categorical_crossentropy(loss): + result = ((isinstance(loss, CategoricalCrossentropy) or + (isinstance(loss, LossFunctionWrapper) and + loss.fn == categorical_crossentropy) or + (hasattr(loss, '__name__') and + loss.__name__ == 'categorical_crossentropy') or + (loss == 'categorical_crossentropy'))) + return result + + +@keras_export('keras.losses.serialize') +def serialize(loss): + return serialize_keras_object(loss) + + +@keras_export('keras.losses.deserialize') +def deserialize(name, custom_objects=None): + return deserialize_keras_object( + name, + module_objects=globals(), + custom_objects=custom_objects, + printable_module_name='loss function') + + +@keras_export('keras.losses.get') +def get(identifier): + if identifier is None: + return None + if isinstance(identifier, six.string_types): + identifier = str(identifier) + return deserialize(identifier) + if isinstance(identifier, dict): + return deserialize(identifier) + elif callable(identifier): + return identifier + else: + raise ValueError('Could not interpret ' + 'loss function identifier:', identifier) + + +LABEL_DTYPES_FOR_LOSSES = { + losses_impl.sparse_softmax_cross_entropy: 'int32', + sparse_categorical_crossentropy: 'int32' +} \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_full_1p_static.sh b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_full_1p_static.sh new file mode 100644 index 000000000..91507abd5 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_full_1p_static.sh @@ -0,0 +1,171 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=256 +#网络名称,同目录名称 +Network="involution_ID2515_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=200 +#训练step +#train_steps=50000 +#学习率 +#learning_rate=1e-5 +#网络类型 convolution or involution +network="convolution" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=True +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 involution.py --data_path=$data_path \ + --epochs=$train_epochs \ + --Drop_Reminder=True \ + --batch_size=$batch_size \ + --save_h5=False \ + --network=$network \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$15}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..ce3d5853f --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,136 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +export ASCEND_SLOG_PRINT_TO_STDOUT=1 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=1024 +#网络名称,同目录名称 +Network="involution_ID2515_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=20 +#训练step +#train_steps=50000 +#学习率 +#learning_rate=1e-5 +#网络类型 convolution or involution +network="convolution" +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +#参数修改 +#sed -i "s|../data/tfrecord|${data_path}/data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_CKPT = ROOT_PATH + '/|PRETRAINED_CKPT = '${cur_path}/|g" ${cur_path}/libs/configs/cfgs.py + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 involution.py --data_path=$data_path \ + --batch_size=$batch_size \ + --epochs=$train_epochs \ + --Drop_Reminder=False \ + --save_h5=False \ + --network=$network > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2etime=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#参数回改 +#sed -i "s|${datth}/th}//io//tfrecord|../data/tfrecord|g" ${cur_path}/data/io/read_tfrecord.py +#sed -i "s|PRETRAINED_C'/|g" ${cur_paath}/|PRETRAINED_CKPT = ROOT_PATH + '/|g" ${cur_path}/libs/configs/cfgs.py + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +#由于loss和性能取值不连续,所以每次只取每个Epoch的最后一个loss和性能值 +#StepTime=`grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v 'ETA' | awk '{print $5}' | tr -d ms/step | awk '{sum+=$1} END {print sum/NR}'` +#FPS计算 +#FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${StepTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=`grep accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v 'ETA' | awk '{print $11}' | awk '{sum+=$1} END {print sum/NR}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2etime" + +###下面字段用于冒烟看护 +BatchSize=${batch_size} +#设备类型,自动获取 +DeviceType=`uname -m` +#用例名称,自动获取 +CaseName=${Network}_bs${BatchSize}_${RankSize}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +#ActualFPS=${FPS} +#单迭代训练时长 +#TrainingTime=`grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v 'ETA' | awk '{print $4}' | tr -d s | awk '{sum+=$1} END {print sum}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep loss: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -Eo " loss: [0-9]*\.[0-9]*"| awk '{print $2}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +##获取错误信息 +#系统错误信息 +ModelStatus="图执行FAIL" +error_msg="EZ3002" +#判断错误信息是否和历史状态一致,此处无需修改 +error_msg="Graph engine process graph failed: EZ3002: Optype \[Conv2DBackpropFilter\] of Ops kernel" +Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#DTS单号或者issue链接 +DTS_Number="DTS2021090622224" + +#关键信息打印到CaseName.log中,此处无需修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_performance_1p_inv.sh b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_performance_1p_inv.sh new file mode 100644 index 000000000..9fd389328 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_performance_1p_inv.sh @@ -0,0 +1,160 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=256 +#网络名称,同目录名称 +Network="involution_ID2515_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=2 +#训练step +#train_steps=50000 +#学习率 +#learning_rate=1e-5 +#网络类型 convolution or involution +network="involution" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=True +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 involution.py --data_path=$data_path \ + --epochs=$train_epochs \ + --Drop_Reminder=True \ + --batch_size=$batch_size \ + --save_h5=False \ + --network=$network \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2etime=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +echo "Final Training Duration sec : $e2etime" + +###下面字段用于冒烟看护 +BatchSize=${batch_size} +#设备类型,自动获取 +DeviceType=`uname -m` +#用例名称,自动获取 +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取错误信息 +#系统错误信息 +ModelStatus="图执行FAIL" +#判断错误信息是否和历史状态一致,此处无需修改 +error_msg="E19014: Value \[input x shape\] for Op \[inv_model/inv_2/reshape_1/Shape\] is invalid. Reason: contains negative or zero dimension." +Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#DTS单号或者issue链接 +DTS_Number="DTS2021090622224" + +#关键信息打印到CaseName.log中,此处无需修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_performance_1p_static.sh new file mode 100644 index 000000000..c19eb17a8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/involution_ID2515_for_TensorFlow2.X/test/train_performance_1p_static.sh @@ -0,0 +1,171 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=512 +#网络名称,同目录名称 +Network="involution_ID2515_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=5 +#训练step +#train_steps=50000 +#学习率 +#learning_rate=1e-5 +#网络类型 convolution or involution +network="convolution" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=True +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 involution.py --data_path=$data_path \ + --epochs=$train_epochs \ + --Drop_Reminder=True \ + --batch_size=$batch_size \ + --save_h5=False \ + --network=$network \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$15}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}_static" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log -- Gitee From 6b2bc847158fce320cf9c51e93c4255824d65aa1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:17:54 +0000 Subject: [PATCH 40/54] =?UTF-8?q?keypoint=5Fdetection=5FID2516=5Ffor=5FTen?= =?UTF-8?q?sorFlow2.X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 21 + .../README.md | 212 ++++ .../README_BAK.md | 533 ++++++++++ .../keypoint_detection.py | 970 ++++++++++++++++++ .../modelzoo_level.txt | 3 + .../requirements.txt | 0 .../run_1p.sh | 2 + .../test/train_full_1p.sh | 166 +++ .../test/train_performance_1p.sh | 166 +++ .../test/train_performance_1p_static.sh | 167 +++ 10 files changed, 2240 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/README_BAK.md create mode 100644 TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/keypoint_detection.py create mode 100644 TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/run_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/test/train_performance_1p_static.sh diff --git a/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..51d555a15 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Ke YU + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..ca02720a2 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/README.md @@ -0,0 +1,212 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Object Detection** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.04.11** + +**大小(Size):10.2M** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Research** + +**描述(Description):基于TensorFlow2.X框架的图像关键点检测网络训练代码** + + +

概述

+ +## 简述 + + keypoint_detection网络使用数据增强和迁移学习训练图像关键点检测器。关键点检测包括定位关键对象部分,例如,我们脸部的关键部位包括鼻尖、眉毛、眼角等。这些部分有助于以功能丰富的方式表示底层对象。关键点检测的应用包括姿势估计、人脸检测等。 + + + - 参考论文: + + skip + + - 参考实现: + https://github.com/keras-team/keras-io/blob/master/examples/vision/keypoint_detection.py(https://github.com/keras-team/keras-io/blob/master/examples/vision/keypoint_detection.py) + + + - 适配昇腾 AI 处理器的实现: + skip + + - 通过Git获取对应commit\_id的代码方法如下: + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + + + + +## 默认配置 + + +- 网络结构 + - 在 ImageNet-1k 数据集上预训练的 MobileNetV2 作为主干网络 + - 网络总参数2,354,256 + +- 训练超参(单卡): + - Batch size: 64 + - IMG_SIZE: 224 + - NUM_KEYPOINTS: 24 * 2 + - Train epoch: 5 + + +## 支持特性 + +| 特性列表 | 是否支持 | +|-------|------| +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +相关代码示例。 + +``` + config_proto = tf.ConfigProto(allow_soft_placement=True) + custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add() + custom_op.name = 'NpuOptimizer' + custom_op.parameter_map["use_off_line"].b = True + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") + config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF + session_config = npu_config_proto(config_proto=config_proto) +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + + +

快速上手

+ +## 数据集准备 + + 模型训练使用StanfordExtra数据集,包含 12,000 张狗的图像以及关键点和分割图,由从斯坦福狗数据集开发的。 +注释在 StanfordExtra 数据集中作为单个 JSON 文件提供,需要填写此表单才能访问它。JSON 文件预计将在本地以stanfordextra_v12.zip.下载文件后,我们可以提取档案。 +``` +tar xf images.tar +unzip -qq ~/stanfordextra_v12.zip +``` + + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2.1 设置单卡训练参数(脚本位于keypoint_detection_ID2516_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。 + + + ``` + batch_size=64 + #训练step + train_epochs=5 + + ``` + + + + 2.2 单卡训练指令(keypoint_detection_ID2516_for_TensorFlow2.X/test) + + ``` + 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡 + bash train_full_1p.sh --data_path=xx + 数据集提供了一个元数据文件keypoint_definitions.csv,其中指定了有关关键点的附加信息,如颜色信息、动物姿势名称等。我们将把这个文件加载到pandas 数据框中以提取信息以用于可视化目的。配置data_path时需指定为data这一层,例:--data_path=/home/data + ├─data + ├─Images + ├─keypoint_definitions.csv + ├─models + ├─StanfordExtra_V12 + + + ``` + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备 + +- 模型训练 + + 请参考“快速上手”章节 + +

高级参考

+ +## 脚本和示例代码 + + ├── README.md //说明文档 + ├── requirements.txt //依赖 + ├── modelzoo_level.txt //状态文件 + ├── keypoint_detection.py //网络结构定义脚本 + ├── test + | |—— train_full_1p.sh //单卡训练脚本 + | |—— train_performance_1p.sh //单卡训练脚本 + + +## 脚本参数 + +``` +batch_size 训练batch_size +learning_rate 初始学习率 +epochs 训练epoch数 +precision_mode default="allow_mix_precision", type=str,help='the path to save over dump data' +over_dump type=ast.literal_eval,help='if or not over detection, default is False' +data_dump_flag type=ast.literal_eval,help='data dump flag, default is False' +data_dump_step data dump step, default is 10 +profiling type=ast.literal_eval help='if or not profiling for performance debug, default is False' +profiling_dump_path type=str, help='the path to save profiling data' +over_dump_path type=str, help='the path to save over dump data' +data_dump_path type=str, help='the path to save dump data' +use_mixlist type=ast.literal_eval,help='use_mixlist flag, default is False' +fusion_off_flag type=ast.literal_eval,help='fusion_off flag, default is False' +mixlist_file type=str,help='mixlist file name, default is ops_info.json' +fusion_off_file type=str,help='fusion_off file name, default is fusion_switch.cfg' +auto_tune help='auto_tune flag, default is False' +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡训练。 +将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 diff --git a/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/README_BAK.md b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/README_BAK.md new file mode 100644 index 000000000..f08901a53 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/README_BAK.md @@ -0,0 +1,533 @@ +# Keypoint Detection with Transfer Learning + +**Author:** [Sayak Paul](https://twitter.com/RisingSayak)
+**Date created:** 2021/05/02
+**Last modified:** 2021/05/02
+**Description:** Training a keypoint detector with data augmentation and transfer learning. + + + [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/keypoint_detection.ipynb) [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/keypoint_detection.py) + + + +Keypoint detection consists of locating key object parts. For example, the key parts +of our faces include nose tips, eyebrows, eye corners, and so on. These parts help to +represent the underlying object in a feature-rich manner. Keypoint detection has +applications that include pose estimation, face detection, etc. + +In this example, we will build a keypoint detector using the +[StanfordExtra dataset](https://github.com/benjiebob/StanfordExtra), +using transfer learning. This example requires TensorFlow 2.4 or higher, +as well as [`imgaug`](https://imgaug.readthedocs.io/) library, +which can be installed using the following command: + + +```python +!pip install -q -U imgaug +``` + +--- +## Data collection + +The StanfordExtra dataset contains 12,000 images of dogs together with keypoints and +segmentation maps. It is developed from the [Stanford dogs dataset](http://vision.stanford.edu/aditya86/ImageNetDogs/). +It can be downloaded with the command below: + + +```python +!wget -q http://vision.stanford.edu/aditya86/ImageNetDogs/images.tar +``` + +Annotations are provided as a single JSON file in the StanfordExtra dataset and one needs +to fill [this form](https://forms.gle/sRtbicgxsWvRtRmUA) to get access to it. The +authors explicitly instruct users not to share the JSON file, and this example respects this wish: +you should obtain the JSON file yourself. + +The JSON file is expected to be locally available as `stanfordextra_v12.zip`. + +After the files are downloaded, we can extract the archives. + + +```python +!tar xf images.tar +!unzip -qq ~/stanfordextra_v12.zip +``` + +--- +## Imports + + +```python +from tensorflow.keras import layers +from tensorflow import keras +import tensorflow as tf + +from imgaug.augmentables.kps import KeypointsOnImage +from imgaug.augmentables.kps import Keypoint +import imgaug.augmenters as iaa + +from PIL import Image +from sklearn.model_selection import train_test_split +from matplotlib import pyplot as plt +import pandas as pd +import numpy as np +import json +import os +``` + +--- +## Define hyperparameters + + +```python +IMG_SIZE = 224 +BATCH_SIZE = 64 +EPOCHS = 5 +NUM_KEYPOINTS = 24 * 2 # 24 pairs each having x and y coordinates +``` + +--- +## Load data + +The authors also provide a metadata file that specifies additional information about the +keypoints, like color information, animal pose name, etc. We will load this file in a `pandas` +dataframe to extract information for visualization purposes. + + +```python +IMG_DIR = "Images" +JSON = "StanfordExtra_V12/StanfordExtra_v12.json" +KEYPOINT_DEF = ( + "https://github.com/benjiebob/StanfordExtra/raw/master/keypoint_definitions.csv" +) + +# Load the ground-truth annotations. +with open(JSON) as infile: + json_data = json.load(infile) + +# Set up a dictionary, mapping all the ground-truth information +# with respect to the path of the image. +json_dict = {i["img_path"]: i for i in json_data} +``` + +A single entry of `json_dict` looks like the following: + +``` +'n02085782-Japanese_spaniel/n02085782_2886.jpg': +{'img_bbox': [205, 20, 116, 201], + 'img_height': 272, + 'img_path': 'n02085782-Japanese_spaniel/n02085782_2886.jpg', + 'img_width': 350, + 'is_multiple_dogs': False, + 'joints': [[108.66666666666667, 252.0, 1], + [147.66666666666666, 229.0, 1], + [163.5, 208.5, 1], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [54.0, 244.0, 1], + [77.33333333333333, 225.33333333333334, 1], + [79.0, 196.5, 1], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [150.66666666666666, 86.66666666666667, 1], + [88.66666666666667, 73.0, 1], + [116.0, 106.33333333333333, 1], + [109.0, 123.33333333333333, 1], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0]], + 'seg': ...} +``` + +In this example, the keys we are interested in are: + +* `img_path` +* `joints` + +There are a total of 24 entries present inside `joints`. Each entry has 3 values: + +* x-coordinate +* y-coordinate +* visibility flag of the keypoints (1 indicates visibility and 0 indicates non-visibility) + +As we can see `joints` contain multiple `[0, 0, 0]` entries which denote that those +keypoints were not labeled. In this example, we will consider both non-visible as well as +unlabeled keypoints in order to allow mini-batch learning. + + +```python +# Load the metdata definition file and preview it. +keypoint_def = pd.read_csv(KEYPOINT_DEF) +keypoint_def.head() + +# Extract the colours and labels. +colours = keypoint_def["Hex colour"].values.tolist() +colours = ["#" + colour for colour in colours] +labels = keypoint_def["Name"].values.tolist() + +# Utility for reading an image and for getting its annotations. +def get_dog(name): + data = json_dict[name] + img_data = plt.imread(os.path.join(IMG_DIR, data["img_path"])) + # If the image is RGBA convert it to RGB. + if img_data.shape[-1] == 4: + img_data = img_data.astype(np.uint8) + img_data = Image.fromarray(img_data) + img_data = np.array(img_data.convert("RGB")) + data["img_data"] = img_data + + return data + +``` + +--- +## Visualize data + +Now, we write a utility function to visualize the images and their keypoints. + + +```python +# Parts of this code come from here: +# https://github.com/benjiebob/StanfordExtra/blob/master/demo.ipynb +def visualize_keypoints(images, keypoints): + fig, axes = plt.subplots(nrows=len(images), ncols=2, figsize=(16, 12)) + [ax.axis("off") for ax in np.ravel(axes)] + + for (ax_orig, ax_all), image, current_keypoint in zip(axes, images, keypoints): + ax_orig.imshow(image) + ax_all.imshow(image) + + # If the keypoints were formed by `imgaug` then the coordinates need + # to be iterated differently. + if isinstance(current_keypoint, KeypointsOnImage): + for idx, kp in enumerate(current_keypoint.keypoints): + ax_all.scatter( + [kp.x], [kp.y], c=colours[idx], marker="x", s=50, linewidths=5 + ) + else: + current_keypoint = np.array(current_keypoint) + # Since the last entry is the visibility flag, we discard it. + current_keypoint = current_keypoint[:, :2] + for idx, (x, y) in enumerate(current_keypoint): + ax_all.scatter([x], [y], c=colours[idx], marker="x", s=50, linewidths=5) + + plt.tight_layout(pad=2.0) + plt.show() + + +# Select four samples randomly for visualization. +samples = list(json_dict.keys()) +num_samples = 4 +selected_samples = np.random.choice(samples, num_samples, replace=False) + +images, keypoints = [], [] + +for sample in selected_samples: + data = get_dog(sample) + image = data["img_data"] + keypoint = data["joints"] + + images.append(image) + keypoints.append(keypoint) + +visualize_keypoints(images, keypoints) +``` + + + +![png](/img/examples/vision/keypoint_detection/keypoint_detection_18_0.png) + + + +The plots show that we have images of non-uniform sizes, which is expected in most +real-world scenarios. However, if we resize these images to have a uniform shape (for +instance (224 x 224)) their ground-truth annotations will also be affected. The same +applies if we apply any geometric transformation (horizontal flip, for e.g.) to an image. +Fortunately, `imgaug` provides utilities that can handle this issue. +In the next section, we will write a data generator inheriting the +[`keras.utils.Sequence`](https://keras.io/api/utils/python_utils/#sequence-class) class +that applies data augmentation on batches of data using `imgaug`. + +--- +## Prepare data generator + + +```python + +class KeyPointsDataset(keras.utils.Sequence): + def __init__(self, image_keys, aug, batch_size=BATCH_SIZE, train=True): + self.image_keys = image_keys + self.aug = aug + self.batch_size = batch_size + self.train = train + self.on_epoch_end() + + def __len__(self): + return len(self.image_keys) // self.batch_size + + def on_epoch_end(self): + self.indexes = np.arange(len(self.image_keys)) + if self.train: + np.random.shuffle(self.indexes) + + def __getitem__(self, index): + indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size] + image_keys_temp = [self.image_keys[k] for k in indexes] + (images, keypoints) = self.__data_generation(image_keys_temp) + + return (images, keypoints) + + def __data_generation(self, image_keys_temp): + batch_images = np.empty((self.batch_size, IMG_SIZE, IMG_SIZE, 3), dtype="int") + batch_keypoints = np.empty( + (self.batch_size, 1, 1, NUM_KEYPOINTS), dtype="float32" + ) + + for i, key in enumerate(image_keys_temp): + data = get_dog(key) + current_keypoint = np.array(data["joints"])[:, :2] + kps = [] + + # To apply our data augmentation pipeline, we first need to + # form Keypoint objects with the original coordinates. + for j in range(0, len(current_keypoint)): + kps.append(Keypoint(x=current_keypoint[j][0], y=current_keypoint[j][1])) + + # We then project the original image and its keypoint coordinates. + current_image = data["img_data"] + kps_obj = KeypointsOnImage(kps, shape=current_image.shape) + + # Apply the augmentation pipeline. + (new_image, new_kps_obj) = self.aug(image=current_image, keypoints=kps_obj) + batch_images[i,] = new_image + + # Parse the coordinates from the new keypoint object. + kp_temp = [] + for keypoint in new_kps_obj: + kp_temp.append(np.nan_to_num(keypoint.x)) + kp_temp.append(np.nan_to_num(keypoint.y)) + + # More on why this reshaping later. + batch_keypoints[i,] = np.array(kp_temp).reshape(1, 1, 24 * 2) + + # Scale the coordinates to [0, 1] range. + batch_keypoints = batch_keypoints / IMG_SIZE + + return (batch_images, batch_keypoints) + +``` + +To know more about how to operate with keypoints in `imgaug` check out +[this document](https://imgaug.readthedocs.io/en/latest/source/examples_keypoints.html). + +--- +## Define augmentation transforms + + +```python +train_aug = iaa.Sequential( + [ + iaa.Resize(IMG_SIZE, interpolation="linear"), + iaa.Fliplr(0.3), + # `Sometimes()` applies a function randomly to the inputs with + # a given probability (0.3, in this case). + iaa.Sometimes(0.3, iaa.Affine(rotate=10, scale=(0.5, 0.7))), + ] +) + +test_aug = iaa.Sequential([iaa.Resize(IMG_SIZE, interpolation="linear")]) +``` + +--- +## Create training and validation splits + + +```python +np.random.shuffle(samples) +train_keys, validation_keys = ( + samples[int(len(samples) * 0.15) :], + samples[: int(len(samples) * 0.15)], +) + +``` + +--- +## Data generator investigation + + +```python +train_dataset = KeyPointsDataset(train_keys, train_aug) +validation_dataset = KeyPointsDataset(validation_keys, test_aug, train=False) + +print(f"Total batches in training set: {len(train_dataset)}") +print(f"Total batches in validation set: {len(validation_dataset)}") + +sample_images, sample_keypoints = next(iter(train_dataset)) +assert sample_keypoints.max() == 1.0 +assert sample_keypoints.min() == 0.0 + +sample_keypoints = sample_keypoints[:4].reshape(-1, 24, 2) * IMG_SIZE +visualize_keypoints(sample_images[:4], sample_keypoints) +``` + +
+``` +Total batches in training set: 166 +Total batches in validation set: 29 + +``` +
+ +![png](/img/examples/vision/keypoint_detection/keypoint_detection_28_1.png) + + + +--- +## Model building + +The [Stanford dogs dataset](http://vision.stanford.edu/aditya86/ImageNetDogs/) (on which +the StanfordExtra dataset is based) was built using the [ImageNet-1k dataset](http://image-net.org/). +So, it is likely that the models pretrained on the ImageNet-1k dataset would be useful +for this task. We will use a MobileNetV2 pre-trained on this dataset as a backbone to +extract meaningful features from the images and then pass those to a custom regression +head for predicting coordinates. + + +```python + +def get_model(): + # Load the pre-trained weights of MobileNetV2 and freeze the weights + backbone = keras.applications.MobileNetV2( + weights="imagenet", include_top=False, input_shape=(IMG_SIZE, IMG_SIZE, 3) + ) + backbone.trainable = False + + inputs = layers.Input((IMG_SIZE, IMG_SIZE, 3)) + x = keras.applications.mobilenet_v2.preprocess_input(inputs) + x = backbone(x) + x = layers.Dropout(0.3)(x) + x = layers.SeparableConv2D( + NUM_KEYPOINTS, kernel_size=5, strides=1, activation="relu" + )(x) + outputs = layers.SeparableConv2D( + NUM_KEYPOINTS, kernel_size=3, strides=1, activation="sigmoid" + )(x) + + return keras.Model(inputs, outputs, name="keypoint_detector") + +``` + +Our custom network is fully-convolutional which makes it more parameter-friendly than the +same version of the network having fully-connected dense layers. + + +```python +get_model().summary() +``` + +
+``` +Model: "keypoint_detector" +_________________________________________________________________ +Layer (type) Output Shape Param # +================================================================= +input_2 (InputLayer) [(None, 224, 224, 3)] 0 +_________________________________________________________________ +tf.math.truediv (TFOpLambda) (None, 224, 224, 3) 0 +_________________________________________________________________ +tf.math.subtract (TFOpLambda (None, 224, 224, 3) 0 +_________________________________________________________________ +mobilenetv2_1.00_224 (Functi (None, 7, 7, 1280) 2257984 +_________________________________________________________________ +dropout (Dropout) (None, 7, 7, 1280) 0 +_________________________________________________________________ +separable_conv2d (SeparableC (None, 3, 3, 48) 93488 +_________________________________________________________________ +separable_conv2d_1 (Separabl (None, 1, 1, 48) 2784 +================================================================= +Total params: 2,354,256 +Trainable params: 96,272 +Non-trainable params: 2,257,984 +_________________________________________________________________ + +``` +
+Notice the output shape of the network: `(None, 1, 1, 48)`. This is why we have reshaped +the coordinates as: `batch_keypoints[i, :] = np.array(kp_temp).reshape(1, 1, 24 * 2)`. + +--- +## Model compilation and training + +For this example, we will train the network only for five epochs. + + +```python +model = get_model() +model.compile(loss="mse", optimizer=keras.optimizers.Adam(1e-4)) +model.fit(train_dataset, validation_data=validation_dataset, epochs=EPOCHS) +``` + +
+``` +Epoch 1/5 +166/166 [==============================] - 85s 486ms/step - loss: 0.1087 - val_loss: 0.0950 +Epoch 2/5 +166/166 [==============================] - 78s 471ms/step - loss: 0.0830 - val_loss: 0.0778 +Epoch 3/5 +166/166 [==============================] - 78s 468ms/step - loss: 0.0778 - val_loss: 0.0739 +Epoch 4/5 +166/166 [==============================] - 78s 470ms/step - loss: 0.0753 - val_loss: 0.0711 +Epoch 5/5 +166/166 [==============================] - 78s 468ms/step - loss: 0.0735 - val_loss: 0.0692 + + + +``` +
+--- +## Make predictions and visualize them + + +```python +sample_val_images, sample_val_keypoints = next(iter(validation_dataset)) +sample_val_images = sample_val_images[:4] +sample_val_keypoints = sample_val_keypoints[:4].reshape(-1, 24, 2) * IMG_SIZE +predictions = model.predict(sample_val_images).reshape(-1, 24, 2) * IMG_SIZE + +# Ground-truth +visualize_keypoints(sample_val_images, sample_val_keypoints) + +# Predictions +visualize_keypoints(sample_val_images, predictions) +``` + + + +![png](/img/examples/vision/keypoint_detection/keypoint_detection_37_0.png) + + + + + +![png](/img/examples/vision/keypoint_detection/keypoint_detection_37_1.png) + + + +Predictions will likely improve with more training. + +--- +## Going further + +* Try using other augmentation transforms from `imgaug` to investigate how that changes +the results. +* Here, we transferred the features from the pre-trained network linearly that is we did +not [fine-tune](https://keras.io/guides/transfer_learning/) it. You are encouraged to fine-tune it on this task and see if that +improves the performance. You can also try different architectures and see how they +affect the final performance. diff --git a/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/keypoint_detection.py b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/keypoint_detection.py new file mode 100644 index 000000000..6f41f747e --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/keypoint_detection.py @@ -0,0 +1,970 @@ +""" +Title: Keypoint Detection with Transfer Learning +Author: [Sayak Paul](https://twitter.com/RisingSayak) +Date created: 2021/05/02 +Last modified: 2021/05/02 +Description: Training a keypoint detector with data augmentation and transfer learning. +""" +""" +Keypoint detection consists of locating key object parts. For example, the key parts +of our faces include nose tips, eyebrows, eye corners, and so on. These parts help to +represent the underlying object in a feature-rich manner. Keypoint detection has +applications that include pose estimation, face detection, etc. + +In this example, we will build a keypoint detector using the +[StanfordExtra dataset](https://github.com/benjiebob/StanfordExtra), +using transfer learning. This example requires TensorFlow 2.4 or higher, +as well as [`imgaug`](https://imgaug.readthedocs.io/) library, +which can be installed using the following command: +""" + +"""shell +pip install -q -U imgaug +""" + +""" +## Data collection +""" + +""" +The StanfordExtra dataset contains 12,000 images of dogs together with keypoints and +segmentation maps. It is developed from the [Stanford dogs dataset](http://vision.stanford.edu/aditya86/ImageNetDogs/). +It can be downloaded with the command below: +""" + +"""shell +wget -q http://vision.stanford.edu/aditya86/ImageNetDogs/images.tar +""" + +""" +Annotations are provided as a single JSON file in the StanfordExtra dataset and one needs +to fill [this form](https://forms.gle/sRtbicgxsWvRtRmUA) to get access to it. The +authors explicitly instruct users not to share the JSON file, and this example respects this wish: +you should obtain the JSON file yourself. + +The JSON file is expected to be locally available as `stanfordextra_v12.zip`. + +After the files are downloaded, we can extract the archives. +""" + +"""shell +tar xf images.tar +unzip -qq ~/stanfordextra_v12.zip +""" + +""" +## Imports +""" + +from tensorflow.keras import layers +from tensorflow import keras +import tensorflow as tf + +from imgaug.augmentables.kps import KeypointsOnImage +from imgaug.augmentables.kps import Keypoint +import imgaug.augmenters as iaa + +from PIL import Image +from sklearn.model_selection import train_test_split +from matplotlib import pyplot as plt +import pandas as pd +import numpy as np +import json +import os +import ast +import argparse +import npu_device + +# npu_device.open().as_default() + +def parse_args(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_path', default='/root/keypoint_detection', help="""directory to data""") + parser.add_argument('--batch_size', default=64, type=int, help="""batch size for 1p""") + parser.add_argument('--epochs', default=5, type=int, help="""epochs""") + parser.add_argument('--static', default=0, type=int, help="""static""") + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval,help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval,help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10",help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval,help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval,help='auto_tune flag, default is False') + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + + +args = parse_args() + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + if args.auto_tune: + npu_device.global_options().auto_tune_mode="RL,GA" + npu_device.open().as_default() +#===============================NPU Migration========================================= +npu_config() + +""" +## Define hyperparameters +""" + +IMG_SIZE = 224 +BATCH_SIZE = args.batch_size # 64 +EPOCHS = args.epochs # 5 +NUM_KEYPOINTS = 24 * 2 # 24 pairs each having x and y coordinates + +""" +## Load data + +The authors also provide a metadata file that specifies additional information about the +keypoints, like color information, animal pose name, etc. We will load this file in a `pandas` +dataframe to extract information for visualization purposes. +""" + +IMG_DIR = os.path.join(args.data_path, "Images") +JSON = os.path.join(args.data_path, "StanfordExtra_V12/StanfordExtra_v12.json") +KEYPOINT_DEF = ( + # "https://github.com/benjiebob/StanfordExtra/raw/master/keypoint_definitions.csv" + os.path.join(args.data_path, "keypoint_definitions.csv") +) + +# Load the ground-truth annotations. +with open(JSON) as infile: + json_data = json.load(infile) + +# Set up a dictionary, mapping all the ground-truth information +# with respect to the path of the image. +json_dict = {i["img_path"]: i for i in json_data} + + +""" +A single entry of `json_dict` looks like the following: + +``` +'n02085782-Japanese_spaniel/n02085782_2886.jpg': +{'img_bbox': [205, 20, 116, 201], + 'img_height': 272, + 'img_path': 'n02085782-Japanese_spaniel/n02085782_2886.jpg', + 'img_width': 350, + 'is_multiple_dogs': False, + 'joints': [[108.66666666666667, 252.0, 1], + [147.66666666666666, 229.0, 1], + [163.5, 208.5, 1], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [54.0, 244.0, 1], + [77.33333333333333, 225.33333333333334, 1], + [79.0, 196.5, 1], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [150.66666666666666, 86.66666666666667, 1], + [88.66666666666667, 73.0, 1], + [116.0, 106.33333333333333, 1], + [109.0, 123.33333333333333, 1], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0]], + 'seg': ...} +``` +""" + +""" +In this example, the keys we are interested in are: + +* `img_path` +* `joints` + +There are a total of 24 entries present inside `joints`. Each entry has 3 values: + +* x-coordinate +* y-coordinate +* visibility flag of the keypoints (1 indicates visibility and 0 indicates non-visibility) + +As we can see `joints` contain multiple `[0, 0, 0]` entries which denote that those +keypoints were not labeled. In this example, we will consider both non-visible as well as +unlabeled keypoints in order to allow mini-batch learning. +""" + +# Load the metdata definition file and preview it. +keypoint_def = pd.read_csv(KEYPOINT_DEF) +keypoint_def.head() + +# Extract the colours and labels. +colours = keypoint_def["Hex colour"].values.tolist() +colours = ["#" + colour for colour in colours] +labels = keypoint_def["Name"].values.tolist() + + +# Utility for reading an image and for getting its annotations. +def get_dog(name): + data = json_dict[name] + img_data = plt.imread(os.path.join(IMG_DIR, data["img_path"])) + # If the image is RGBA convert it to RGB. + if img_data.shape[-1] == 4: + img_data = img_data.astype(np.uint8) + img_data = Image.fromarray(img_data) + img_data = np.array(img_data.convert("RGB")) + data["img_data"] = img_data + + return data + + +""" +## Visualize data + +Now, we write a utility function to visualize the images and their keypoints. +""" + + +# Parts of this code come from here: +# https://github.com/benjiebob/StanfordExtra/blob/master/demo.ipynb +def visualize_keypoints(images, keypoints): + fig, axes = plt.subplots(nrows=len(images), ncols=2, figsize=(16, 12)) + [ax.axis("off") for ax in np.ravel(axes)] + + for (ax_orig, ax_all), image, current_keypoint in zip(axes, images, keypoints): + ax_orig.imshow(image) + ax_all.imshow(image) + + # If the keypoints were formed by `imgaug` then the coordinates need + # to be iterated differently. + if isinstance(current_keypoint, KeypointsOnImage): + for idx, kp in enumerate(current_keypoint.keypoints): + ax_all.scatter( + [kp.x], [kp.y], c=colours[idx], marker="x", s=50, linewidths=5 + ) + else: + current_keypoint = np.array(current_keypoint) + # Since the last entry is the visibility flag, we discard it. + current_keypoint = current_keypoint[:, :2] + for idx, (x, y) in enumerate(current_keypoint): + ax_all.scatter([x], [y], c=colours[idx], marker="x", s=50, linewidths=5) + + plt.tight_layout(pad=2.0) + plt.show() + + +# Select four samples randomly for visualization. +samples = list(json_dict.keys()) +num_samples = 4 +selected_samples = np.random.choice(samples, num_samples, replace=False) + +images, keypoints = [], [] + +for sample in selected_samples: + data = get_dog(sample) + image = data["img_data"] + keypoint = data["joints"] + + images.append(image) + keypoints.append(keypoint) + +visualize_keypoints(images, keypoints) + +""" +The plots show that we have images of non-uniform sizes, which is expected in most +real-world scenarios. However, if we resize these images to have a uniform shape (for +instance (224 x 224)) their ground-truth annotations will also be affected. The same +applies if we apply any geometric transformation (horizontal flip, for e.g.) to an image. +Fortunately, `imgaug` provides utilities that can handle this issue. +In the next section, we will write a data generator inheriting the +[`keras.utils.Sequence`](https://keras.io/api/utils/python_utils/#sequence-class) class +that applies data augmentation on batches of data using `imgaug`. +""" + +""" +## Prepare data generator +""" + + +class KeyPointsDataset(keras.utils.Sequence): + def __init__(self, image_keys, aug, batch_size=BATCH_SIZE, train=True): + self.image_keys = image_keys + self.aug = aug + self.batch_size = batch_size + self.train = train + self.on_epoch_end() + + def __len__(self): + return len(self.image_keys) // self.batch_size + + def on_epoch_end(self): + self.indexes = np.arange(len(self.image_keys)) + if self.train: + np.random.shuffle(self.indexes) + + def __getitem__(self, index): + indexes = self.indexes[index * self.batch_size: (index + 1) * self.batch_size] + image_keys_temp = [self.image_keys[k] for k in indexes] + (images, keypoints) = self.__data_generation(image_keys_temp) + + return (images, keypoints) + + def __data_generation(self, image_keys_temp): + batch_images = np.empty((self.batch_size, IMG_SIZE, IMG_SIZE, 3), dtype="int") + batch_keypoints = np.empty( + (self.batch_size, 1, 1, NUM_KEYPOINTS), dtype="float32" + ) + + for i, key in enumerate(image_keys_temp): + data = get_dog(key) + current_keypoint = np.array(data["joints"])[:, :2] + kps = [] + + # To apply our data augmentation pipeline, we first need to + # form Keypoint objects with the original coordinates. + for j in range(0, len(current_keypoint)): + kps.append(Keypoint(x=current_keypoint[j][0], y=current_keypoint[j][1])) + + # We then project the original image and its keypoint coordinates. + current_image = data["img_data"] + kps_obj = KeypointsOnImage(kps, shape=current_image.shape) + + # Apply the augmentation pipeline. + (new_image, new_kps_obj) = self.aug(image=current_image, keypoints=kps_obj) + batch_images[i,] = new_image + + # Parse the coordinates from the new keypoint object. + kp_temp = [] + for keypoint in new_kps_obj: + kp_temp.append(np.nan_to_num(keypoint.x)) + kp_temp.append(np.nan_to_num(keypoint.y)) + + # More on why this reshaping later. + batch_keypoints[i,] = np.array(kp_temp).reshape(1, 1, 24 * 2) + + # Scale the coordinates to [0, 1] range. + batch_keypoints = batch_keypoints / IMG_SIZE + + return (batch_images, batch_keypoints) + + +""" +To know more about how to operate with keypoints in `imgaug` check out +[this document](https://imgaug.readthedocs.io/en/latest/source/examples_keypoints.html). +""" + +""" +## Define augmentation transforms +""" + +train_aug = iaa.Sequential( + [ + iaa.Resize(IMG_SIZE, interpolation="linear"), + iaa.Fliplr(0.3), + # `Sometimes()` applies a function randomly to the inputs with + # a given probability (0.3, in this case). + iaa.Sometimes(0.3, iaa.Affine(rotate=10, scale=(0.5, 0.7))), + ] +) + +test_aug = iaa.Sequential([iaa.Resize(IMG_SIZE, interpolation="linear")]) + +""" +## Create training and validation splits +""" + +np.random.shuffle(samples) + +train_keys, validation_keys = ( + samples[int(len(samples) * 0.15):], + samples[: int(len(samples) * 0.15)] + ) + +""" +## Data generator investigation +""" + +train_dataset = KeyPointsDataset(train_keys, train_aug) +print('-------------------------------------------------------------------TTTTTTTTTTTTTTTTType') +#print(train_dataset.shape) +print(type(train_dataset)) +if args.static==1: + train_dataset=(np.array(train_dataset[0][:4096]),[np.array(train_dataset[1][:4096]), np.array(train_dataset[2][:4096]), np.array(train_dataset[3][:4096])]) +validation_dataset = KeyPointsDataset(validation_keys, test_aug, train=False) + +print(f"Total batches in training set: {len(train_dataset)}") +print(f"Total batches in validation set: {len(validation_dataset)}") + +sample_images, sample_keypoints = next(iter(train_dataset)) +assert sample_keypoints.max() == 1.0 +assert sample_keypoints.min() == 0.0 + +sample_keypoints = sample_keypoints[:4].reshape(-1, 24, 2) * IMG_SIZE +visualize_keypoints(sample_images[:4], sample_keypoints) + +""" +## Model building + +The [Stanford dogs dataset](http://vision.stanford.edu/aditya86/ImageNetDogs/) (on which +the StanfordExtra dataset is based) was built using the [ImageNet-1k dataset](http://image-net.org/). +So, it is likely that the models pretrained on the ImageNet-1k dataset would be useful +for this task. We will use a MobileNetV2 pre-trained on this dataset as a backbone to +extract meaningful features from the images and then pass those to a custom regression +head for predicting coordinates. +""" + +import tensorflow.compat.v2 as tf + +from keras import backend +from keras.applications import imagenet_utils +from keras.engine import training +from keras.layers import VersionAwareLayers +from keras.utils import data_utils +from keras.utils import layer_utils +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util.tf_export import keras_export + +BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/' + 'keras-applications/mobilenet_v2/') +layers = None + + +@keras_export('keras.applications.mobilenet_v2.MobileNetV2', + 'keras.applications.MobileNetV2') +def MobileNetV2(input_shape=None, + alpha=1.0, + include_top=True, + weights='imagenet', + input_tensor=None, + pooling=None, + classes=1000, + classifier_activation='softmax', + cache_dir=None, + **kwargs): + """Instantiates the MobileNetV2 architecture. + + MobileNetV2 is very similar to the original MobileNet, + except that it uses inverted residual blocks with + bottlenecking features. It has a drastically lower + parameter count than the original MobileNet. + MobileNets support any input size greater + than 32 x 32, with larger image sizes + offering better performance. + + Reference: + - [MobileNetV2: Inverted Residuals and Linear Bottlenecks]( + https://arxiv.org/abs/1801.04381) (CVPR 2018) + + This function returns a Keras image classification model, + optionally loaded with weights pre-trained on ImageNet. + + For image classification use cases, see + [this page for detailed examples]( + https://keras.io/api/applications/#usage-examples-for-image-classification-models). + + For transfer learning use cases, make sure to read the + [guide to transfer learning & fine-tuning]( + https://keras.io/guides/transfer_learning/). + + Note: each Keras Application expects a specific kind of input preprocessing. + For MobileNetV2, call `tf.keras.applications.mobilenet_v2.preprocess_input` + on your inputs before passing them to the model. + `mobilenet_v2.preprocess_input` will scale input pixels between -1 and 1. + + Args: + input_shape: Optional shape tuple, to be specified if you would + like to use a model with an input image resolution that is not + (224, 224, 3). + It should have exactly 3 inputs channels (224, 224, 3). + You can also omit this option if you would like + to infer input_shape from an input_tensor. + If you choose to include both input_tensor and input_shape then + input_shape will be used if they match, if the shapes + do not match then we will throw an error. + E.g. `(160, 160, 3)` would be one valid value. + alpha: Float between 0 and 1. controls the width of the network. + This is known as the width multiplier in the MobileNetV2 paper, + but the name is kept for consistency with `applications.MobileNetV1` + model in Keras. + - If `alpha` < 1.0, proportionally decreases the number + of filters in each layer. + - If `alpha` > 1.0, proportionally increases the number + of filters in each layer. + - If `alpha` = 1.0, default number of filters from the paper + are used at each layer. + include_top: Boolean, whether to include the fully-connected + layer at the top of the network. Defaults to `True`. + weights: String, one of `None` (random initialization), + 'imagenet' (pre-training on ImageNet), + or the path to the weights file to be loaded. + input_tensor: Optional Keras tensor (i.e. output of + `layers.Input()`) + to use as image input for the model. + pooling: String, optional pooling mode for feature extraction + when `include_top` is `False`. + - `None` means that the output of the model + will be the 4D tensor output of the + last convolutional block. + - `avg` means that global average pooling + will be applied to the output of the + last convolutional block, and thus + the output of the model will be a + 2D tensor. + - `max` means that global max pooling will + be applied. + classes: Integer, optional number of classes to classify images + into, only to be specified if `include_top` is True, and + if no `weights` argument is specified. + classifier_activation: A `str` or callable. The activation function to use + on the "top" layer. Ignored unless `include_top=True`. Set + `classifier_activation=None` to return the logits of the "top" layer. + When loading pretrained weights, `classifier_activation` can only + be `None` or `"softmax"`. + **kwargs: For backwards compatibility only. + + Returns: + A `keras.Model` instance. + """ + global layers + if 'layers' in kwargs: + layers = kwargs.pop('layers') + else: + layers = VersionAwareLayers() + if kwargs: + raise ValueError('Unknown argument(s): %s' % (kwargs,)) + if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)): + raise ValueError('The `weights` argument should be either ' + '`None` (random initialization), `imagenet` ' + '(pre-training on ImageNet), ' + 'or the path to the weights file to be loaded.') + + if weights == 'imagenet' and include_top and classes != 1000: + raise ValueError('If using `weights` as `"imagenet"` with `include_top` ' + 'as true, `classes` should be 1000') + + # Determine proper input shape and default size. + # If both input_shape and input_tensor are used, they should match + if input_shape is not None and input_tensor is not None: + try: + is_input_t_tensor = backend.is_keras_tensor(input_tensor) + except ValueError: + try: + is_input_t_tensor = backend.is_keras_tensor( + layer_utils.get_source_inputs(input_tensor)) + except ValueError: + raise ValueError('input_tensor: ', input_tensor, + 'is not type input_tensor') + if is_input_t_tensor: + if backend.image_data_format() == 'channels_first': + if backend.int_shape(input_tensor)[1] != input_shape[1]: + raise ValueError('input_shape: ', input_shape, 'and input_tensor: ', + input_tensor, + 'do not meet the same shape requirements') + else: + if backend.int_shape(input_tensor)[2] != input_shape[1]: + raise ValueError('input_shape: ', input_shape, 'and input_tensor: ', + input_tensor, + 'do not meet the same shape requirements') + else: + raise ValueError('input_tensor specified: ', input_tensor, + 'is not a keras tensor') + + # If input_shape is None, infer shape from input_tensor + if input_shape is None and input_tensor is not None: + + try: + backend.is_keras_tensor(input_tensor) + except ValueError: + raise ValueError('input_tensor: ', input_tensor, 'is type: ', + type(input_tensor), 'which is not a valid type') + + if input_shape is None and not backend.is_keras_tensor(input_tensor): + default_size = 224 + elif input_shape is None and backend.is_keras_tensor(input_tensor): + if backend.image_data_format() == 'channels_first': + rows = backend.int_shape(input_tensor)[2] + cols = backend.int_shape(input_tensor)[3] + else: + rows = backend.int_shape(input_tensor)[1] + cols = backend.int_shape(input_tensor)[2] + + if rows == cols and rows in [96, 128, 160, 192, 224]: + default_size = rows + else: + default_size = 224 + + # If input_shape is None and no input_tensor + elif input_shape is None: + default_size = 224 + + # If input_shape is not None, assume default size + else: + if backend.image_data_format() == 'channels_first': + rows = input_shape[1] + cols = input_shape[2] + else: + rows = input_shape[0] + cols = input_shape[1] + + if rows == cols and rows in [96, 128, 160, 192, 224]: + default_size = rows + else: + default_size = 224 + + input_shape = imagenet_utils.obtain_input_shape( + input_shape, + default_size=default_size, + min_size=32, + data_format=backend.image_data_format(), + require_flatten=include_top, + weights=weights) + + if backend.image_data_format() == 'channels_last': + row_axis, col_axis = (0, 1) + else: + row_axis, col_axis = (1, 2) + rows = input_shape[row_axis] + cols = input_shape[col_axis] + + if weights == 'imagenet': + if alpha not in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]: + raise ValueError('If imagenet weights are being loaded, ' + 'alpha can be one of `0.35`, `0.50`, `0.75`, ' + '`1.0`, `1.3` or `1.4` only.') + + if rows != cols or rows not in [96, 128, 160, 192, 224]: + rows = 224 + logging.warning('`input_shape` is undefined or non-square, ' + 'or `rows` is not in [96, 128, 160, 192, 224].' + ' Weights for input shape (224, 224) will be' + ' loaded as the default.') + + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + else: + if not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + + channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 + + first_block_filters = _make_divisible(32 * alpha, 8) + x = layers.Conv2D( + first_block_filters, + kernel_size=3, + strides=(2, 2), + padding='same', + use_bias=False, + name='Conv1')(img_input) + x = layers.BatchNormalization( + axis=channel_axis, epsilon=1e-3, momentum=0.999, name='bn_Conv1')( + x) + x = layers.ReLU(6., name='Conv1_relu')(x) + + x = _inverted_res_block( + x, filters=16, alpha=alpha, stride=1, expansion=1, block_id=0) + + x = _inverted_res_block( + x, filters=24, alpha=alpha, stride=2, expansion=6, block_id=1) + x = _inverted_res_block( + x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=2) + + x = _inverted_res_block( + x, filters=32, alpha=alpha, stride=2, expansion=6, block_id=3) + x = _inverted_res_block( + x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=4) + x = _inverted_res_block( + x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=5) + + x = _inverted_res_block( + x, filters=64, alpha=alpha, stride=2, expansion=6, block_id=6) + x = _inverted_res_block( + x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=7) + x = _inverted_res_block( + x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=8) + x = _inverted_res_block( + x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=9) + + x = _inverted_res_block( + x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=10) + x = _inverted_res_block( + x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=11) + x = _inverted_res_block( + x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=12) + + x = _inverted_res_block( + x, filters=160, alpha=alpha, stride=2, expansion=6, block_id=13) + x = _inverted_res_block( + x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=14) + x = _inverted_res_block( + x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=15) + + x = _inverted_res_block( + x, filters=320, alpha=alpha, stride=1, expansion=6, block_id=16) + + # no alpha applied to last conv as stated in the paper: + # if the width multiplier is greater than 1 we + # increase the number of output channels + if alpha > 1.0: + last_block_filters = _make_divisible(1280 * alpha, 8) + else: + last_block_filters = 1280 + + x = layers.Conv2D( + last_block_filters, kernel_size=1, use_bias=False, name='Conv_1')( + x) + x = layers.BatchNormalization( + axis=channel_axis, epsilon=1e-3, momentum=0.999, name='Conv_1_bn')( + x) + x = layers.ReLU(6., name='out_relu')(x) + + if include_top: + x = layers.GlobalAveragePooling2D()(x) + imagenet_utils.validate_activation(classifier_activation, weights) + x = layers.Dense(classes, activation=classifier_activation, + name='predictions')(x) + + else: + if pooling == 'avg': + x = layers.GlobalAveragePooling2D()(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D()(x) + + # Ensure that the model takes into account + # any potential predecessors of `input_tensor`. + if input_tensor is not None: + inputs = layer_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + + # Create model. + model = training.Model(inputs, x, name='mobilenetv2_%0.2f_%s' % (alpha, rows)) + + # Load weights. + if weights == 'imagenet': + if include_top: + model_name = ('mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' + + str(float(alpha)) + '_' + str(rows) + '.h5') + weight_path = BASE_WEIGHT_PATH + model_name + weights_path = data_utils.get_file( + model_name, weight_path, cache_subdir='models', cache_dir=cache_dir) + else: + model_name = ('mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' + + str(float(alpha)) + '_' + str(rows) + '_no_top' + '.h5') + weight_path = BASE_WEIGHT_PATH + model_name + weights_path = data_utils.get_file( + model_name, weight_path, cache_subdir='models', cache_dir=cache_dir) + model.load_weights(weights_path) + elif weights is not None: + model.load_weights(weights) + + return model + + +def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id): + """Inverted ResNet block.""" + channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 + + in_channels = backend.int_shape(inputs)[channel_axis] + pointwise_conv_filters = int(filters * alpha) + pointwise_filters = _make_divisible(pointwise_conv_filters, 8) + x = inputs + prefix = 'block_{}_'.format(block_id) + + if block_id: + # Expand + x = layers.Conv2D( + expansion * in_channels, + kernel_size=1, + padding='same', + use_bias=False, + activation=None, + name=prefix + 'expand')( + x) + x = layers.BatchNormalization( + axis=channel_axis, + epsilon=1e-3, + momentum=0.999, + name=prefix + 'expand_BN')( + x) + x = layers.ReLU(6., name=prefix + 'expand_relu')(x) + else: + prefix = 'expanded_conv_' + + # Depthwise + if stride == 2: + x = layers.ZeroPadding2D( + padding=imagenet_utils.correct_pad(x, 3), + name=prefix + 'pad')(x) + x = layers.DepthwiseConv2D( + kernel_size=3, + strides=stride, + activation=None, + use_bias=False, + padding='same' if stride == 1 else 'valid', + name=prefix + 'depthwise')( + x) + x = layers.BatchNormalization( + axis=channel_axis, + epsilon=1e-3, + momentum=0.999, + name=prefix + 'depthwise_BN')( + x) + + x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x) + + # Project + x = layers.Conv2D( + pointwise_filters, + kernel_size=1, + padding='same', + use_bias=False, + activation=None, + name=prefix + 'project')( + x) + x = layers.BatchNormalization( + axis=channel_axis, + epsilon=1e-3, + momentum=0.999, + name=prefix + 'project_BN')( + x) + + if in_channels == pointwise_filters and stride == 1: + return layers.Add(name=prefix + 'add')([inputs, x]) + return x + + +def _make_divisible(v, divisor, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +@keras_export('keras.applications.mobilenet_v2.preprocess_input') +def preprocess_input(x, data_format=None): + return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf') + + +@keras_export('keras.applications.mobilenet_v2.decode_predictions') +def decode_predictions(preds, top=5): + return imagenet_utils.decode_predictions(preds, top=top) + + +preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format( + mode='', + ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF, + error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC) +decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__ + + +def get_model(): + # Load the pre-trained weights of MobileNetV2 and freeze the weights + backbone = MobileNetV2( + weights="imagenet", include_top=False, input_shape=(IMG_SIZE, IMG_SIZE, 3), cache_dir=args.data_path + ) + backbone.trainable = False + + inputs = layers.Input((IMG_SIZE, IMG_SIZE, 3)) + x = keras.applications.mobilenet_v2.preprocess_input(inputs) + x = backbone(x) + x = layers.Dropout(0.3)(x) + x = layers.SeparableConv2D( + NUM_KEYPOINTS, kernel_size=5, strides=1, activation="relu" + )(x) + outputs = layers.SeparableConv2D( + NUM_KEYPOINTS, kernel_size=3, strides=1, activation="sigmoid" + )(x) + + return keras.Model(inputs, outputs, name="keypoint_detector") + + +""" +Our custom network is fully-convolutional which makes it more parameter-friendly than the +same version of the network having fully-connected dense layers. +""" + +# get_model().summary() + +""" +Notice the output shape of the network: `(None, 1, 1, 48)`. This is why we have reshaped +the coordinates as: `batch_keypoints[i, :] = np.array(kp_temp).reshape(1, 1, 24 * 2)`. +""" + +""" +## Model compilation and training + +For this example, we will train the network only for five epochs. +""" + +model = get_model() +model.compile(loss="mse", optimizer=keras.optimizers.Adam(1e-4)) +model.fit(train_dataset, validation_data=validation_dataset, epochs=EPOCHS, verbose=2) +model.save_weights(filepath="checkpoint/tf_model",save_format="tf") + +""" +## Make predictions and visualize them +""" + +# sample_val_images, sample_val_keypoints = next(iter(validation_dataset)) +# sample_val_images = sample_val_images[:4] +# sample_val_keypoints = sample_val_keypoints[:4].reshape(-1, 24, 2) * IMG_SIZE +# predictions = model.predict(sample_val_images).reshape(-1, 24, 2) * IMG_SIZE + +# # Ground-truth +# visualize_keypoints(sample_val_images, sample_val_keypoints) + +# # Predictions +# visualize_keypoints(sample_val_images, predictions) + +""" +Predictions will likely improve with more training. +""" + +""" +## Going further + +* Try using other augmentation transforms from `imgaug` to investigate how that changes +the results. +* Here, we transferred the features from the pre-trained network linearly that is we did +not [fine-tune](https://keras.io/guides/transfer_learning/) it. You are encouraged to fine-tune it on this task and see if that +improves the performance. You can also try different architectures and see how they +affect the final performance. +""" diff --git a/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..a829ab59b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:NOK +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/run_1p.sh b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/run_1p.sh new file mode 100644 index 000000000..29003bf08 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/run_1p.sh @@ -0,0 +1,2 @@ +cur_path='pwd' +python3 ${cur_path}/keypoint_detection.py > loss+perf_gpu.txt 2>&1 diff --git a/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..f329db972 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,166 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="keypoint_detection_ID2516_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=5 +#训练step +#train_steps=50000 +#学习率 +# learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/output/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_full_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/test/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/test/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/test/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 keypoint_detection.py \ + --data_path=$data_path \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path}>$cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep 333/333 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"` +FPS=`awk 'BEGIN{printf "%.2f\n",'333'*'${batch_size}'/'${TrainingTime}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep '333/333' $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $6}'` +#打印,不需要修改 +echo "Final Train Accuracy : =${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep 333/333 | awk '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..ca8a3aa55 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,166 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL_ETP=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=64 +#网络名称,同目录名称 +Network="keypoint_detection_ID2516_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=2 +#训练step +#train_steps=50000 +#学习率 +# learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/output/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/test/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/test/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/test/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 keypoint_detection.py \ + --data_path=$data_path \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path}>$cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep 166/166 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |awk '{print $3}'|tr -d 's'|awk '{sum+=$1} END {print"",sum/NR}'|sed s/[[:space:]]//g` +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +# train_accuracy=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$9}'` +#打印,不需要修改 +echo "Final Train Accuracy : =Loss" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|grep 166/166 |awk '{print $6}'|sed s/[[:space:]]//g > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = Loss" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/test/train_performance_1p_static.sh new file mode 100644 index 000000000..ea35d321f --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/test/train_performance_1p_static.sh @@ -0,0 +1,167 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#设置默认日志级别,不需要修改 +# export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="keypoint_detection_ID2516_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=2 +#训练step +#train_steps=50000 +#学习率 +# learning_rate=0.001 +# weight_decay=0.0001 +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/output/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_full_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/test/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/test/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/test/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 keypoint_detection.py \ + --data_path=$data_path \ + --epochs=${train_epochs} \ + --batch_size=${batch_size} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} \ + --static=1 >$cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep loss: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"` +FPS=`awk 'BEGIN{printf "%.2f\n",'333'*'${batch_size}'/'${TrainingTime}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep 'loss:' $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $6}'` +#打印,不需要修改 +echo "Final Train Accuracy : =${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep loss: | awk '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}_static" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file -- Gitee From 8ff70fe60fe5f23eac6f4c254deb9ae4803aba00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:18:52 +0000 Subject: [PATCH 41/54] =?UTF-8?q?knowledge=5Fdisti=5FID2517=5Ffor=5FTensor?= =?UTF-8?q?Flow2.X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 284 +++++++++++++++ .../ReadMe.md | 26 ++ .../knowledge_distillation.py | 332 ++++++++++++++++++ .../requirements.txt | 2 + .../test/train_full_1p.sh | 103 ++++++ .../test/train_performance_1p_dynamic_eval.sh | 114 ++++++ .../test/train_performance_1p_static_eval.sh | 103 ++++++ 7 files changed, 964 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/ReadMe.md create mode 100644 TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/knowledge_distillation.py create mode 100644 TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh create mode 100644 TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh diff --git a/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..d87a402b5 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/ReadMe.md b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/ReadMe.md new file mode 100644 index 000000000..6bda4da8b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/ReadMe.md @@ -0,0 +1,26 @@ +## 一、基础信息 + +### 网络名称 + +```shell +knowledge_distillation_ID2517_for_TensorFlow2.X +``` + +### github地址 + +```shell +https://github.com/keras-team/keras-io/tree/master/examples/vision +``` + +### 数据集 + +``` +MNIST +``` + +### 程序运行 + +``` +train_performance_1p_static_eval.sh --data_path=/MNIST/mnist.npz +``` + diff --git a/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/knowledge_distillation.py b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/knowledge_distillation.py new file mode 100644 index 000000000..71938b428 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/knowledge_distillation.py @@ -0,0 +1,332 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Title: Knowledge Distillation +Author: [Kenneth Borup](https://twitter.com/Kennethborup) +Date created: 2020/09/01 +Last modified: 2020/09/01 +Description: Implementation of classical Knowledge Distillation. +""" + +""" +## Introduction to Knowledge Distillation + +Knowledge Distillation is a procedure for model +compression, in which a small (student) model is trained to match a large pre-trained +(teacher) model. Knowledge is transferred from the teacher model to the student +by minimizing a loss function, aimed at matching softened teacher logits as well as +ground-truth labels. + +The logits are softened by applying a "temperature" scaling function in the softmax, +effectively smoothing out the probability distribution and revealing +inter-class relationships learned by the teacher. + +**Reference:** + +- [Hinton et al. (2015)](https://arxiv.org/abs/1503.02531) +""" + +""" +## Setup +""" + +import npu_device +npu_device.open().as_default() + +import ast +import argparse + +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import layers +import numpy as np + + +""" +## Construct `Distiller()` class + +The custom `Distiller()` class, overrides the `Model` methods `train_step`, `test_step`, +and `compile()`. In order to use the distiller, we need: + +- A trained teacher model +- A student model to train +- A student loss function on the difference between student predictions and ground-truth +- A distillation loss function, along with a `temperature`, on the difference between the +soft student predictions and the soft teacher labels +- An `alpha` factor to weight the student and distillation loss +- An optimizer for the student and (optional) metrics to evaluate performance + +In the `train_step` method, we perform a forward pass of both the teacher and student, +calculate the loss with weighting of the `student_loss` and `distillation_loss` by `alpha` and +`1 - alpha`, respectively, and perform the backward pass. Note: only the student weights are updated, +and therefore we only calculate the gradients for the student weights. + +In the `test_step` method, we evaluate the student model on the provided dataset. +""" + +parser = argparse.ArgumentParser() +parser.add_argument('--data_path', default="/home/data", type=str, + help='the path to train data') +parser.add_argument('--epoch', default=2, type=int, + help='the path to train data') +parser.add_argument('--eval_static', dest="eval_static", type=ast.literal_eval, + help='the path to train data') +args = parser.parse_args() + +class Distiller(keras.Model): + def __init__(self, student, teacher): + super(Distiller, self).__init__() + self.teacher = teacher + self.student = student + + def compile( + self, + optimizer, + metrics, + student_loss_fn, + distillation_loss_fn, + alpha=0.1, + temperature=3, + ): + """ Configure the distiller. + + Args: + optimizer: Keras optimizer for the student weights + metrics: Keras metrics for evaluation + student_loss_fn: Loss function of difference between student + predictions and ground-truth + distillation_loss_fn: Loss function of difference between soft + student predictions and soft teacher predictions + alpha: weight to student_loss_fn and 1-alpha to distillation_loss_fn + temperature: Temperature for softening probability distributions. + Larger temperature gives softer distributions. + """ + super(Distiller, self).compile(optimizer=optimizer, metrics=metrics) + self.student_loss_fn = student_loss_fn + self.distillation_loss_fn = distillation_loss_fn + self.alpha = alpha + self.temperature = temperature + + def train_step(self, data): + # Unpack data + x, y = data + + # Forward pass of teacher + teacher_predictions = self.teacher(x, training=False) + + with tf.GradientTape() as tape: + # Forward pass of student + student_predictions = self.student(x, training=True) + + # Compute losses + student_loss = self.student_loss_fn(y, student_predictions) + distillation_loss = self.distillation_loss_fn( + tf.nn.softmax(teacher_predictions / self.temperature, axis=1), + tf.nn.softmax(student_predictions / self.temperature, axis=1), + ) + loss = self.alpha * student_loss + (1 - self.alpha) * distillation_loss + + # Compute gradients + trainable_vars = self.student.trainable_variables + gradients = tape.gradient(loss, trainable_vars) + + # Update weights + self.optimizer.apply_gradients(zip(gradients, trainable_vars)) + + # Update the metrics configured in `compile()`. + self.compiled_metrics.update_state(y, student_predictions) + + # Return a dict of performance + results = {m.name: m.result() for m in self.metrics} + results.update( + {"student_loss": student_loss, "distillation_loss": distillation_loss} + ) + return results + + def test_step(self, data): + # Unpack the data + x, y = data + + # Compute predictions + y_prediction = self.student(x, training=False) + + # Calculate the loss + student_loss = self.student_loss_fn(y, y_prediction) + + # Update the metrics. + self.compiled_metrics.update_state(y, y_prediction) + + # Return a dict of performance + results = {m.name: m.result() for m in self.metrics} + results.update({"student_loss": student_loss}) + return results + + +""" +## Create student and teacher models + +Initialy, we create a teacher model and a smaller student model. Both models are +convolutional neural networks and created using `Sequential()`, +but could be any Keras model. +""" + +# Create the teacher +teacher = keras.Sequential( + [ + keras.Input(shape=(28, 28, 1)), + layers.Conv2D(256, (3, 3), strides=(2, 2), padding="same"), + layers.LeakyReLU(alpha=0.2), + layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding="same"), + layers.Conv2D(512, (3, 3), strides=(2, 2), padding="same"), + layers.Flatten(), + layers.Dense(10), + ], + name="teacher", +) + +# Create the student +student = keras.Sequential( + [ + keras.Input(shape=(28, 28, 1)), + layers.Conv2D(16, (3, 3), strides=(2, 2), padding="same"), + layers.LeakyReLU(alpha=0.2), + layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding="same"), + layers.Conv2D(32, (3, 3), strides=(2, 2), padding="same"), + layers.Flatten(), + layers.Dense(10), + ], + name="student", +) + +# Clone student for later comparison +student_scratch = keras.models.clone_model(student) + +""" +## Prepare the dataset + +The dataset used for training the teacher and distilling the teacher is +[MNIST](https://keras.io/api/datasets/mnist/), and the procedure would be equivalent for any other +dataset, e.g. [CIFAR-10](https://keras.io/api/datasets/cifar10/), with a suitable choice +of models. Both the student and teacher are trained on the training set and evaluated on +the test set. +""" + +# Prepare the train and test dataset. +batch_size = 32 +# (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() + +path = args.data_path + '/mnist.npz' +f = np.load(path) +x_train, y_train = f['x_train'], f['y_train'] +x_test, y_test = f['x_test'], f['y_test'] +f.close() + +if args.eval_static: + x_test = x_test[:9984] + y_test = y_test[:9984] + +# Normalize data +x_train = x_train.astype("float32") / 255.0 +x_train = np.reshape(x_train, (-1, 28, 28, 1)) + +x_test = x_test.astype("float32") / 255.0 +x_test = np.reshape(x_test, (-1, 28, 28, 1)) + + +""" +## Train the teacher + +In knowledge distillation we assume that the teacher is trained and fixed. Thus, we start +by training the teacher model on the training set in the usual way. +""" + +# Train teacher as usual +teacher.compile( + optimizer=keras.optimizers.Adam(), + loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), + metrics=[keras.metrics.SparseCategoricalAccuracy()], +) + +# Train and evaluate teacher on data. +teacher.fit(x_train, y_train, epochs=args.epoch, verbose=2) +teacher.evaluate(x_test, y_test, verbose=2) + +""" +## Distill teacher to student + +We have already trained the teacher model, and we only need to initialize a +`Distiller(student, teacher)` instance, `compile()` it with the desired losses, +hyperparameters and optimizer, and distill the teacher to the student. +""" + +# Initialize and compile distiller +distiller = Distiller(student=student, teacher=teacher) +distiller.compile( + optimizer=keras.optimizers.Adam(), + metrics=[keras.metrics.SparseCategoricalAccuracy()], + student_loss_fn=keras.losses.SparseCategoricalCrossentropy(from_logits=True), + distillation_loss_fn=keras.losses.KLDivergence(), + alpha=0.1, + temperature=10, +) + +# Distill teacher to student +distiller.fit(x_train, y_train, epochs=args.epoch, verbose=2) + +# Evaluate student on test dataset +distiller.evaluate(x_test, y_test, verbose=2) + +""" +## Train student from scratch for comparison + +We can also train an equivalent student model from scratch without the teacher, in order +to evaluate the performance gain obtained by knowledge distillation. +""" + +# Train student as doen usually +student_scratch.compile( + optimizer=keras.optimizers.Adam(), + loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), + metrics=[keras.metrics.SparseCategoricalAccuracy()], +) + +# Train and evaluate student trained from scratch. +student_scratch.fit(x_train, y_train, epochs=args.epoch, verbose=2) +student_scratch.evaluate(x_test, y_test, verbose=2) + +""" +If the teacher is trained for 5 full epochs and the student is distilled on this teacher +for 3 full epochs, you should in this example experience a performance boost compared to +training the same student model from scratch, and even compared to the teacher itself. +You should expect the teacher to have accuracy around 97.6%, the student trained from +scratch should be around 97.6%, and the distilled student should be around 98.1%. Remove +or try out different seeds to use different weight initializations. +""" diff --git a/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..662fa34ad --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/requirements.txt @@ -0,0 +1,2 @@ +tensorflow>=2.4.0 +numpy \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..83871f464 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,103 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="knowledge_distillation_ID2517_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=5 +#训练step +train_steps=60000 +#学习率 +#learning_rate=1e-5 + +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 knowledge_distillation.py --data_path=$data_path \ + --epoch=$train_epochs \ + --eval_static=True > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep 1875/1875 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'1875'*'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep sparse_categorical_accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |awk 'END {print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep student_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $9}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh new file mode 100644 index 000000000..11b158169 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh @@ -0,0 +1,114 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="knowledge_disti_ID2517_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=2 +#训练step +train_steps=60000 +#学习率 +#learning_rate=1e-5 + +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 knowledge_distillation.py --data_path=$data_path \ + --epoch=$train_epochs \ + --eval_static=False > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep 1875/1875 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=`grep sparse_categorical_accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |awk 'END {print $NF}'` +#打印,不需要修改 +#echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'dynamic'_'eval'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep student_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $9}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#ModelStatus="图执行FAIL" +#DTS_Number="DTS2022010611495" +# error_msg="E19999" +#error_msg="Output shape is still unknown after shape inference. shape = \[-1\]." +#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +# error_msg="Graph engine process graph failed: E19999: Inner Error! Output shape is still unknown after shape inference. shape = [-1]." + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh new file mode 100644 index 000000000..43e81d466 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh @@ -0,0 +1,103 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="knowledge_disti_ID2517_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=2 +#训练step +train_steps=60000 +#学习率 +#learning_rate=1e-5 + +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 knowledge_distillation.py --data_path=$data_path \ + --epoch=$train_epochs \ + --eval_static=True > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep 1875/1875 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'1875'*'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep sparse_categorical_accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |awk 'END {print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep student $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $9}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log -- Gitee From ac38490a78094281676e0ec7ed1c14a5fb51fc51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:19:13 +0000 Subject: [PATCH 42/54] =?UTF-8?q?learnable=5Fresizer=5FID2518=5Ffor=5FTens?= =?UTF-8?q?orFlow2.X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 284 ++++++++++++ .../README.md | 202 +++++++++ .../learnable_resizer.py | 414 ++++++++++++++++++ .../modelzoo_level.txt | 3 + .../requirements.txt | 2 + .../test/train_full_1p.sh | 158 +++++++ .../test/train_performance_1p.sh | 158 +++++++ .../test/train_performance_1p_dynamic_eval.sh | 168 +++++++ .../test/train_performance_1p_static_eval.sh | 158 +++++++ 9 files changed, 1547 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/learnable_resizer.py create mode 100644 TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh create mode 100644 TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh diff --git a/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..f6f12e1ef --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/README.md @@ -0,0 +1,202 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Image Classification** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.4.8** + +**大小(Size):51KB** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Official** + +**描述(Description):对于给定的图像分辨率和模型,如何最好地调整给定图像的大小,基于TensorFlow框架的训练代码** + +

概述

+ +## 简述 + + +人们普遍认为,如果我们将视觉模型限制为像人类一样感知事物,他们的表现可以提高。人类大多使用形状描述符来开发共同的认知。但是这种信念是否始终适用,尤其是在改进方面视觉模型的表现? +事实证明,情况可能并非总是如此。在训练视觉模型时,通常将图像大小调整为较低尺寸((224 x 224)、(299 x 299)等)以允许小批量学习并保持计算限制。我们一般使用图像此步骤的调整大小方法,如 双线性插值和调整大小的图像不会在人眼中失去太多的感性特征。在 [Learning to Resize Images for Computer Vision Tasks ], Talebi et al. 中显示如果我们尝试优化视觉模型的图像感知质量而不是人眼,它们的性能可以进一步提高。他们调查以下问题: +**对于给定的图像分辨率和模型,如何最好地调整给定图像的大小?** +如论文所示,这个想法有助于持续提高常见的视觉模型(在 ImageNet-1k 上预训练),如 DenseNet-121、ResNet-50、MobileNetV2 和 EfficientNets。在这个例子中,我们将实现可学习的图像按照论文中的建议调整模块大小,并使用Cats and Dogs数据集和DenseNet-121结构。 + +- 参考论文: + + https://arxiv.org/abs/2103.09950v1 + +- 参考实现: + + https://github.com/keras-team/keras-io/blob/master/examples/vision/learnable_resizer.py + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + + +## 默认配置 + +- 网络结构: + - DenseNet-121 + +- 训练超参(单卡): + - Batch size: 64 + - Input size: (300, 300) + - Target size: (150, 150) + - Interpolation: "bilinear" + - Train epoch: 5 + + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 + +拉起脚本中,传入--precision_mode='allow_mix_precision' + +``` + ./train_performance_1p_16bs.sh --help + +parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message +``` + +相关代码示例: + +``` +flags.DEFINE_string(name='precision_mode', default= 'allow_fp32_to_fp16', + help='allow_fp32_to_fp16/force_fp16/ ' + 'must_keep_origin_dtype/allow_mix_precision.') + +npu_device.global_options().precision_mode=FLAGS.precision_mode +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + +

快速上手

+ +## 数据集准备 + +1、用户自行准备好数据集,本网络使用的数据集是Cats and Dogs数据集 + +数据集目录参考如下: + +``` +├── cats_vs_dogs +│ ├──4.0.0 +│ │ ├──cats_vs_dogs-train.tfrecord-00000-of-00008 +│ │ │ ...... +│ │ ├──cats_vs_dogs-train.tfrecord-00007-of-00008 +│ │ ├──dataset_info.json +│ │ ├──features.json +│ │ ├──label.labels.txt +``` + + + +## 模型训练 + +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + 2. 单卡训练 + + 2. 1单卡训练指令(脚本位于learnable_resizer_ID2518_for_TensorFlow2.X/test/train_full.sh),需要先使用cd命令进入test目录下,再使用下面的命令启动训练。请确保下面例子中的“--data_path”修改为用户的数据路径,这里选择将cats_vs_dogs文件夹放在home目录下。 + + bash train_full_1p.sh --data_path=/home + + + +

高级参考

+ +## 脚本和示例代码 + +``` +|--LICENSE +|--README.md #说明文档 +|--learnable_resizer.py #训练代码 +|--requirements.txt #所需依赖 +|--test #训练脚本目录 +| |--train_full_1p.sh #全量训练脚本 +| |--train_performance_1p_dynamic_eval.sh #performance动态shape训练脚本 +| |--train_performance_1p_static_eval.sh #performance静态shape训练脚本 +``` + +## 脚本参数 + +``` +--data_path # the path to train data +--epoch # epochs of training +--static # Whether it is a static shape +--log_steps # TimeHis log Step +--precision_mode # the path to save over dump data +--over_dump # if or not over detection, default is False +--data_dump_flag # data dump flag, default is False +--data_dump_step # data dump step, default is 10 +--profiling # if or not profiling for performance debug, default is False +--profiling_dump_path # the path to save profiling data +--over_dump_path # the path to save over dump data +--data_dump_path # the path to save dump data +--use_mixlist # use_mixlist flag, default is False +--fusion_off_flag # fusion_off flag, default is False +--mixlist_file # mixlist file name, default is ops_info.json +--fusion_off_file # fusion_off file name, default is fusion_switch.cfg +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡或者多卡训练。单卡和多卡通过运行不同脚本,支持单卡,8卡网络训练。模型存储路径为${cur_path}/output/$ASCEND_DEVICE_ID,包括训练的log以及checkpoints文件。以8卡训练为例,loss信息在文件${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/learnable_resizer.py b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/learnable_resizer.py new file mode 100644 index 000000000..c12f04cad --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/learnable_resizer.py @@ -0,0 +1,414 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +""" +Title: Learning to Resize in Computer Vision +Author: [Sayak Paul](https://twitter.com/RisingSayak) +Date created: 2021/04/30 +Last modified: 2021/05/13 +Description: How to optimally learn representations of images for a given resolution. +""" +""" +It is a common belief that if we constrain vision models to perceive things as humans do, +their performance can be improved. For example, in [this work](https://arxiv.org/abs/1811.12231), +Geirhos et al. showed that the vision models pre-trained on the ImageNet-1k dataset are +biased toward texture whereas human beings mostly use the shape descriptor to develop a +common perception. But does this belief always apply especially when it comes to improving +the performance of vision models? + +It turns out it may not always be the case. When training vision models, it is common to +resize images to a lower dimension ((224 x 224), (299 x 299), etc.) to allow mini-batch +learning and also to keep up the compute limitations. We generally make use of image +resizing methods like **bilinear interpolation** for this step and the resized images do +not lose much of their perceptual character to the human eyes. In +[Learning to Resize Images for Computer Vision Tasks](https://arxiv.org/abs/2103.09950v1), Talebi et al. show +that if we try to optimize the perceptual quality of the images for the vision models +rather than the human eyes, their performance can further be improved. They investigate +the following question: + +**For a given image resolution and a model, how to best resize the given images?** + +As shown in the paper, this idea helps to consistently improve the performance of the +common vision models (pre-trained on ImageNet-1k) like DenseNet-121, ResNet-50, +MobileNetV2, and EfficientNets. In this example, we will implement the learnable image +resizing module as proposed in the paper and demonstrate that on the +[Cats and Dogs dataset](https://www.microsoft.com/en-us/download/details.aspx?id=54765) +using the [DenseNet-121](https://arxiv.org/abs/1608.06993) architecture. + +This example requires TensorFlow 2.4 or higher. +""" + +""" +## Setup +""" + +import npu_device +import time +import ast +import argparse + +from tensorflow.keras import layers +from tensorflow import keras +import tensorflow as tf + +import tensorflow_datasets as tfds + +tfds.disable_progress_bar() + +import numpy as np +#===============================NPU Migration========================================= +parser = argparse.ArgumentParser() +parser.add_argument('--data_path', default="/home/data", type=str, + help='the path to train data') +parser.add_argument('--epoch', default=2, type=int, + help='the epoch tos train ') +parser.add_argument('--static', dest="static", type=ast.literal_eval, + help='Whether it is a static shape') +parser.add_argument("--log_steps", default=145, type=int, + help="TimeHis log Step.") +parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') +parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') +parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') +parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') +parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') +parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') +parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') +parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') +parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') +parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') +parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') +parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') +parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval,help='auto_tune flag, default is False') +args = parser.parse_args() + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + if args.auto_tune: + npu_device.global_options().auto_tune_mode="RL,GA" + npu_device.open().as_default() +#===============================NPU Migration========================================= + +npu_config() + +""" +## Define hyperparameters +""" + +""" +In order to facilitate mini-batch learning, we need to have a fixed shape for the images +inside a given batch. This is why an initial resizing is required. We first resize all +the images to (300 x 300) shape and then learn their optimal representation for the +(150 x 150) resolution. +""" + +INP_SIZE = (300, 300) +TARGET_SIZE = (150, 150) +INTERPOLATION = "bilinear" + +AUTO = tf.data.AUTOTUNE +BATCH_SIZE = 64 +EPOCHS = args.epoch + +""" +In this example, we will use the bilinear interpolation but the learnable image resizer +module is not dependent on any specific interpolation method. We can also use others, +such as bicubic. +""" + +""" +## Load and prepare the dataset + +For this example, we will only use 40% of the total training dataset. +""" + +train_ds, validation_ds = tfds.load( + "cats_vs_dogs", + download=False, + data_dir=args.data_path, + split=["train[:40%]", "train[40%:50%]"], + as_supervised=True, +) + +def preprocess_dataset(image, label): + image = tf.image.resize(image, (INP_SIZE[0], INP_SIZE[1])) + label = tf.one_hot(label, depth=2) + return (image, label) + + +train_ds = ( + train_ds.shuffle(BATCH_SIZE * 100) + .map(preprocess_dataset, num_parallel_calls=AUTO) + .batch(BATCH_SIZE, drop_remainder=args.static) + .prefetch(AUTO) +) +validation_ds = ( + validation_ds.map(preprocess_dataset, num_parallel_calls=AUTO) + .batch(BATCH_SIZE, drop_remainder=args.static) + .prefetch(AUTO) +) + +""" +## Define the learnable resizer utilities + +The figure below (courtesy: [Learning to Resize Images for Computer Vision Tasks](https://arxiv.org/abs/2103.09950v1)) +presents the structure of the learnable resizing module: + +![](https://i.ibb.co/gJYtSs0/image.png) +""" + + +def conv_block(x, filters, kernel_size, strides, activation=layers.LeakyReLU(0.2)): + x = layers.Conv2D(filters, kernel_size, strides, padding="same", use_bias=False)(x) + x = layers.BatchNormalization()(x) + if activation: + x = activation(x) + return x + + +def res_block(x): + inputs = x + x = conv_block(x, 16, 3, 1) + x = conv_block(x, 16, 3, 1, activation=None) + return layers.Add()([inputs, x]) + + +def get_learnable_resizer(filters=16, num_res_blocks=1, interpolation=INTERPOLATION): + inputs = layers.Input(shape=[None, None, 3]) + + # First, perform naive resizing. + naive_resize = layers.Resizing(*TARGET_SIZE, interpolation=interpolation)(inputs) + + # First convolution block without batch normalization. + x = layers.Conv2D(filters=filters, kernel_size=7, strides=1, padding="same")(inputs) + x = layers.LeakyReLU(0.2)(x) + + # Second convolution block with batch normalization. + x = layers.Conv2D(filters=filters, kernel_size=1, strides=1, padding="same")(x) + x = layers.LeakyReLU(0.2)(x) + x = layers.BatchNormalization()(x) + + # Intermediate resizing as a bottleneck. + bottleneck = layers.Resizing(*TARGET_SIZE, interpolation=interpolation)(x) + + # Residual passes. + for _ in range(num_res_blocks): + x = res_block(bottleneck) + + # Projection. + x = layers.Conv2D( + filters=filters, kernel_size=3, strides=1, padding="same", use_bias=False + )(x) + x = layers.BatchNormalization()(x) + + # Skip connection. + x = layers.Add()([bottleneck, x]) + + # Final resized image. + x = layers.Conv2D(filters=3, kernel_size=7, strides=1, padding="same")(x) + final_resize = layers.Add()([naive_resize, x]) + + return tf.keras.Model(inputs, final_resize, name="learnable_resizer") + + +learnable_resizer = get_learnable_resizer() + + +""" +## Model building utility +""" + + +def get_model(): + backbone = tf.keras.applications.DenseNet121( + weights=None, + include_top=True, + classes=2, + input_shape=((TARGET_SIZE[0], TARGET_SIZE[1], 3)), + ) + backbone.trainable = True + + inputs = layers.Input((INP_SIZE[0], INP_SIZE[1], 3)) + x = layers.Rescaling(scale=1.0 / 255)(inputs) + x = learnable_resizer(x) + outputs = backbone(x) + + return tf.keras.Model(inputs, outputs) + + +""" +The structure of the learnable image resizer module allows for flexible integrations with +different vision models. +""" + +""" +## Compile and train our model with learnable resizer +""" + +model = get_model() +model.compile( + loss=keras.losses.CategoricalCrossentropy(label_smoothing=0.1), + optimizer="sgd", + metrics=["accuracy"], +) + +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + #self.opt = optimizer + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + +model.fit(train_ds, validation_data=validation_ds, epochs=EPOCHS, verbose=2, callbacks=[TimeHistory(BATCH_SIZE,args.log_steps)]) + +""" +The plot shows that the visuals of the images have improved with training. The following +table shows the benefits of using the resizing module in comparison to using the bilinear +interpolation: + +| Model | Number of parameters (Million) | Top-1 accuracy | +|:-------------------------: |:-------------------------------: |:--------------: | +| With the learnable resizer | 7.051717 | 67.67% | +| Without the learnable resizer | 7.039554 | 60.19% | + +For more details, you can check out [this repository](https://github.com/sayakpaul/Learnable-Image-Resizing). +Note the above-reported models were trained for 10 epochs on 90% of the training set of +Cats and Dogs unlike this example. Also, note that the increase in the number of +parameters due to the resizing module is very negligible. To ensure that the improvement +in the performance is not due to stochasticity, the models were trained using the same +initial random weights. + +Now, a question worth asking here is - _isn't the improved accuracy simply a consequence +of adding more layers (the resizer is a mini network after all) to the model, compared to +the baseline?_ + +To show that it is not the case, the authors conduct the following experiment: + +* Take a pre-trained model trained some size, say (224 x 224). + +* Now, first, use it to infer predictions on images resized to a lower resolution. Record +the performance. + +* For the second experiment, plug in the resizer module at the top of the pre-trained +model and warm-start the training. Record the performance. + +Now, the authors argue that using the second option is better because it helps the model +learn how to adjust the representations better with respect to the given resolution. +Since the results purely are empirical, a few more experiments such as analyzing the +cross-channel interaction would have been even better. It is worth noting that elements +like [Squeeze and Excitation (SE) blocks](https://arxiv.org/abs/1709.01507), [Global Context (GC) blocks](https://arxiv.org/pdf/1904.11492) also add a few +parameters to an existing network but they are known to help a network process +information in systematic ways to improve the overall performance. +""" + +""" +## Notes + +* To impose shape bias inside the vision models, Geirhos et al. trained them with a +combination of natural and stylized images. It might be interesting to investigate if +this learnable resizing module could achieve something similar as the outputs seem to +discard the texture information. + +* The resizer module can handle arbitrary resolutions and aspect ratios which is very +important for tasks like object detection and segmentation. + +* There is another closely related topic on ***adaptive image resizing*** that attempts +to resize images/feature maps adaptively during training. [EfficientV2](https://arxiv.org/pdf/2104.00298) +uses this idea. +""" diff --git a/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..41666def8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:PERFECT +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..662fa34ad --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/requirements.txt @@ -0,0 +1,2 @@ +tensorflow>=2.4.0 +numpy \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..619ea5eca --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,158 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=64 +#网络名称,同目录名称 +Network="learnable_resizer_ID2518_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=5 +#训练step +train_steps=60000 +#学习率 +#learning_rate=1e-5 + +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 learnable_resizer.py --data_path=$data_path \ + --epoch=$train_epochs \ + --static=True \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$9}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..32d6f14b8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,158 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=64 +#网络名称,同目录名称 +Network="learnable_resizer_ID2518_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=2 +#训练step +train_steps=60000 +#学习率 +#learning_rate=1e-5 + +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 learnable_resizer.py --data_path=$data_path \ + --epoch=$train_epochs \ + --static=False \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$9}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh new file mode 100644 index 000000000..0216a8edb --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh @@ -0,0 +1,168 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=64 +#网络名称,同目录名称 +Network="learnable_resizer_ID2518_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=1 +#训练step +train_steps=60000 +#学习率 +#learning_rate=1e-5 + +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 learnable_resizer.py --data_path=$data_path \ + --epoch=$train_epochs \ + --static=False \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$9}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +ModelStatus="图执行FAIL" +DTS_Number="DTS2022010611495" +# error_msg="E19999" +error_msg="Optype \[Conv2DBackpropFilter\] of Ops kernel \[AIcoreEngine\] is unsupported." +Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh new file mode 100644 index 000000000..f753b1e10 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/learnable_resizer_ID2518_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh @@ -0,0 +1,158 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=64 +#网络名称,同目录名称 +Network="learnable_resizer_ID2518_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=3 +#训练step +train_steps=60000 +#学习率 +#learning_rate=1e-5 + +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 learnable_resizer.py --data_path=$data_path \ + --epoch=$train_epochs \ + --static=True \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep val_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print$9}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log -- Gitee From 4ee6127ff7bd1592ba4f489d4d2538f69e8ec2ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:19:31 +0000 Subject: [PATCH 43/54] =?UTF-8?q?mnist=5Fconvnet=5FID2524=5Ffor=5FTensorFl?= =?UTF-8?q?ow2.X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 284 ++++++++++++++++++ .../README.md | 175 +++++++++++ .../mnist_convnet.py | 259 ++++++++++++++++ .../modelzoo_level.txt | 3 + .../npu_convert_dropout.py | 54 ++++ .../npu_ops.py | 256 ++++++++++++++++ .../requirements.txt | 0 .../run_1p.sh | 2 + .../test/train_full_1p.sh | 193 ++++++++++++ .../test/train_performance_1p.sh | 194 ++++++++++++ .../test/train_performance_1p_static.sh | 194 ++++++++++++ 11 files changed, 1614 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/mnist_convnet.py create mode 100644 TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/npu_convert_dropout.py create mode 100644 TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/npu_ops.py create mode 100644 TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/run_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/test/train_performance_1p_static.sh diff --git a/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..f65d8b44c --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/README.md @@ -0,0 +1,175 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Image Classification** + +**版本(Version):1.1** + +**修改时间(Modified) :2021.10.01** + +**大小(Size)**_**:324KB** + +**框架(Framework):TensorFlow 2.4.1** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Benchmark** + +**描述(Description):基于TensorFlow框架的计算机视觉网络训练代码** + +

概述

+ +- keras官方的vision网络。 + +- 参考论文: + skip +- 参考实现: + + [https://github.com/keras-team/keras-io/tree/master/examples/vision](https://github.com/keras-team/keras-io/tree/master/examples/vision) + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + +## 默认配置 +- 网络结构 + +- 训练超参(单卡): + - Batch size:128 + - Train epochs:15 + + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +相关代码示例。 + +``` +config_proto = tf.ConfigProto(allow_soft_placement=True) + custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add() + custom_op.name = 'NpuOptimizer' + custom_op.parameter_map["use_off_line"].b = True + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") + config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF + session_config = npu_config_proto(config_proto=config_proto) +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + + +

快速上手

+ +## 数据集准备 + +1. 模型训练使用百度魅族深度学习比赛初赛数据集,数据集请用户自行获取。 +2. 数据集下载完毕后,请用户使用代码目录下helper.py修改相关路径后生成训练所需文件。 + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2.1 设置单卡训练参数(脚本位于DIN_ID2641_for_TensorFlow2.X/test/train_performance_1p.sh),示例如下。 + + + ``` + batch_size=128 + #训练step + train_epochs=15 + ``` + + 2.2 单卡训练指令(脚本位于DIN_ID2641_for_TensorFlow2.X/test) + + ``` + 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡 + bash train_full_1p.sh --data_path=xx + 数据集应有如下结构(数据切分可能不同) + | + ├─mnist.npz + + ``` + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备 + +- 模型训练 + + 请参考“快速上手”章节 + +

高级参考

+ +## 脚本和示例代码 + + ├── README.md //说明文档 + ├── requirements.txt //依赖 + ├── mnist_convnet.py + ├── npu_convnet_dropout.py + ├── npu_ops.py + ├── test + | |—— train_full_1p.sh //单卡训练脚本 + | |—— train_performance_1p.sh //单卡训练脚本 + +## 脚本参数 + +``` +batch_size 训练batch_size +train_epochs 总训练epoch数 +其余参数请在utils.py中配置flag默认值 +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡训练。 +将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/mnist_convnet.py b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/mnist_convnet.py new file mode 100644 index 000000000..9d955f89b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/mnist_convnet.py @@ -0,0 +1,259 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Title: Simple MNIST convnet +Author: [fchollet](https://twitter.com/fchollet) +Date created: 2015/06/19 +Last modified: 2020/04/21 +Description: A simple convnet that achieves ~99% test accuracy on MNIST. +""" + +""" +## Setup +""" +import os +import ast +import argparse +import numpy as np +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import layers +import npu_convert_dropout +import npu_device +import time +# npu_device.open().as_default() + +""" +## Prepare the data +""" +def parse_args(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_path', default='./', + help="""directory to data""") + parser.add_argument('--batch_size', default=64, type=int, + help="""batch size for 1p""") + parser.add_argument('--epochs', default=50, type=int, + help="""epochs""") + parser.add_argument("--log_steps", default=50, type=int, + help="TimeHis log Step.") + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str, + help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval, + help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str, help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str, help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str, help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str, + help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str, + help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--auto_tune', dest="auto_tune", type=ast.literal_eval, + help='auto_tune flag') + parser.add_argument('--static', dest="static", type=ast.literal_eval, + help="""judgement dynamic or static shape""") + ''' + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + ''' + return parser.parse_args() + +args = parse_args() +#===============================NPU Migration========================================= +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist="../configs/"+args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file="../configs/"+args.fusion_off_file + if args.auto_tune: + npu_device.global_options().auto_tune_mode="RL,GA" + npu_device.open().as_default() +#===============================NPU Migration========================================= +npu_config() +data_dir = args.data_path + +# Model / data parameters +num_classes = 10 +input_shape = (28, 28, 1) + +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + #self.opt = optimizer + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + self.last_log_step = self.global_steps + self.start_time = None + +# the data, split between train and test sets +#(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() +def load_data(data_dir): + origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' + dirname = 'mnist.npz' + path = os.path.join(data_dir, dirname) + with np.load(path, allow_pickle=True) as f: + x_train, y_train = f['x_train'], f['y_train'] + x_test, y_test = f['x_test'], f['y_test'] + return (x_train, y_train), (x_test, y_test) +(x_train, y_train), (x_test, y_test) = load_data(data_dir) + +#if args.eval_static: +# x_train = x_train[:59968] +# y_train = y_train[:59968] +# x_test = x_test[:9984] +# y_test = y_test[:9984] + +# Scale images to the [0, 1] range +x_train = x_train.astype("float32") / 255 +x_test = x_test.astype("float32") / 255 +# Make sure images have shape (28, 28, 1) +x_train = np.expand_dims(x_train, -1) +x_test = np.expand_dims(x_test, -1) +print("x_train shape:", x_train.shape) +print(x_train.shape[0], "train samples") +print(x_test.shape[0], "test samples") + + +# convert class vectors to binary class matrices +y_train = keras.utils.to_categorical(y_train, num_classes) +y_test = keras.utils.to_categorical(y_test, num_classes) + +""" +## Build the model +""" + +model = keras.Sequential( + [ + keras.Input(shape=input_shape), + layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(num_classes, activation="softmax"), + ] +) + +model.summary() + +""" +## Train the model +""" + +#batch_size = 128 +batch_size = args.batch_size +#epochs = 15 +epochs = args.epochs + +model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) +if args.static: + train_ds = (tf.data.Dataset.from_tensor_slices((x_train, y_train)) + .shuffle(args.batch_size) + .batch(args.batch_size, drop_remainder=args.static) + ) + model.fit(train_ds, batch_size=batch_size, epochs=epochs, callbacks=[TimeHistory(batch_size,844)], verbose=2) +else: + model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1, callbacks=[TimeHistory(batch_size,844)], verbose=2) +#model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1) +model.save_weights(filepath="mnist_convnet", save_format="tf") + + +## Evaluate the trained model + + +score = model.evaluate(x_test, y_test, verbose=0) +print("Test loss:", score[0]) +print("Test accuracy:", score[1]) diff --git a/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..a829ab59b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:NOK +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/npu_convert_dropout.py b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/npu_convert_dropout.py new file mode 100644 index 000000000..95f8689ce --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/npu_convert_dropout.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from keras import backend +from keras.utils import control_flow_util +from keras.layers.core import Dropout +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import nn +import npu_ops + +def dropout_call(self, inputs, training=None): + """Make Keras Dropout to execute NPU dropout""" + if training is None: + training = backend.learning_phase() + + def dropped_inputs(): + return npu_ops.dropout( + inputs, + noise_shape=self._get_noise_shape(inputs), + seed=self.seed, + keep_prob=1 - self.rate) + + output = control_flow_util.smart_cond(training, + dropped_inputs, + lambda : array_ops.identity(inputs)) + + return output + +Dropout.call = dropout_call diff --git a/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/npu_ops.py b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/npu_ops.py new file mode 100644 index 000000000..fa6f8f211 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/npu_ops.py @@ -0,0 +1,256 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Ops for collective operations implemented using hccl.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import numbers +from tensorflow.python.ops import array_ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import ops +from tensorflow.python.eager import context + +from npu_device import gen_npu_ops + + +DEFAULT_GRAPH_SEED = 87654321 +_MAXINT32 = 2**31 - 1 +def LARSV2(input_weight, + input_grad, + weight_decay, + learning_rate, + hyperpara=0.001, + epsilon=0.00001, + use_clip=False, + name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.LARSV2() is not compatible with " + "eager execution.") + + return gen_npu_ops.lars_v2(input_weight=input_weight, + input_grad=input_grad, + weight_decay=weight_decay, + learning_rate=learning_rate, + hyperpara=hyperpara, + epsilon=epsilon, + use_clip=use_clip, + name=name) + + +def _truncate_seed(seed): + return seed % _MAXINT32 # Truncate to fit into 32-bit integer + +def get_seed(op_seed): + global_seed = ops.get_default_graph().seed + + if global_seed is not None: + if op_seed is None: + op_seed = ops.get_default_graph()._last_id + + seeds = _truncate_seed(global_seed), _truncate_seed(op_seed) + else: + if op_seed is not None: + seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed) + else: + seeds = None, None + # Avoid (0, 0) as the C++ ops interpret it as nondeterminism, which would + # be unexpected since Python docs say nondeterminism is (None, None). + if seeds == (0, 0): + return (0, _MAXINT32) + return seeds + +def _get_noise_shape(x, noise_shape): + # If noise_shape is none return immediately. + if noise_shape is None: + return array_ops.shape(x) + + try: + # Best effort to figure out the intended shape. + # If not possible, let the op to handle it. + # In eager mode exception will show up. + noise_shape_ = tensor_shape.as_shape(noise_shape) + except (TypeError, ValueError): + return noise_shape + + if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims): + new_dims = [] + for i, dim in enumerate(x.shape.dims): + if noise_shape_.dims[i].value is None and dim.value is not None: + new_dims.append(dim.value) + else: + new_dims.append(noise_shape_.dims[i].value) + return tensor_shape.TensorShape(new_dims) + + return noise_shape + +def dropout(x, keep_prob, noise_shape=None, seed=None, name=None): + """The gradient for `gelu`. + + Args: + x: A tensor with type is float. + keep_prob: A tensor, float, rate of every element reserved. + noise_shape: A 1-D tensor, with type int32, shape of keep/drop what random + generated. + seed: Random seed. + name: Layer name. + + Returns: + A tensor. + """ + if context.executing_eagerly(): + raise RuntimeError("tf.dropout() is not compatible with " + "eager execution.") + x = ops.convert_to_tensor(x, name="x") + if not x.dtype.is_floating: + raise ValueError("x has to be a floating point tensor since it's going to" + " be scaled. Got a %s tensor instead." % x.dtype) + if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: + raise ValueError("keep_prob must be a scalar tensor or a float in the " + "range (0, 1], got %g" % keep_prob) + if isinstance(keep_prob, float) and keep_prob == 1: + return x + seed, seed2 = get_seed(seed) + noise_shape = _get_noise_shape(x, noise_shape) + gen_out = gen_npu_ops.drop_out_gen_mask(noise_shape, keep_prob, seed, seed2, name) + result = gen_npu_ops.drop_out_do_mask(x, gen_out, keep_prob, name) + return result + +@ops.RegisterGradient("DropOutDoMask") +def _DropOutDoMaskGrad(op, grad): + result = gen_npu_ops.drop_out_do_mask(grad, op.inputs[1], op.inputs[2]) + return [result, None, None] + +def basic_lstm_cell(x, h, c, w, b, keep_prob, forget_bias, state_is_tuple, + activation, name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.basic_lstm_cell() is not compatible with " + "eager execution.") + x = ops.convert_to_tensor(x, name="x") + h = ops.convert_to_tensor(h, name="h") + c = ops.convert_to_tensor(c, name="c") + w = ops.convert_to_tensor(w, name="w") + b = ops.convert_to_tensor(b, name="b") + result = gen_npu_ops.basic_lstm_cell(x, h, c, w, b, keep_prob, forget_bias, state_is_tuple, + activation, name) + return result + +@ops.RegisterGradient("BasicLSTMCell") +def basic_lstm_cell_grad(op, dct, dht, dit, djt, dft, dot, dtanhct): + + dgate, dct_1 = gen_npu_ops.basic_lstm_cell_c_state_grad(op.inputs[2], dht, dct, op.outputs[2], op.outputs[3], op.outputs[4], op.outputs[5], op.outputs[6], forget_bias=op.get_attr("forget_bias"), activation=op.get_attr("activation")) + dw, db = gen_npu_ops.basic_lstm_cell_weight_grad(op.inputs[0], op.inputs[1], dgate) + dxt, dht = gen_npu_ops.basic_lstm_cell_input_grad(dgate, op.inputs[3], keep_prob=op.get_attr("keep_prob")) + + return [dxt, dht, dct_1, dw, db] + +def adam_apply_one_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, add2_y, name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.adam_apply_one_assign() is not compatible with " + "eager execution.") + result = gen_npu_ops.adam_apply_one_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, add2_y,name) + return result + +def adam_apply_one_with_decay_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y, name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.adam_apply_one_with_decay_assign() is not compatible with " + "eager execution.") + result = gen_npu_ops.adam_apply_one_with_decay_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y, name) + return result + +@ops.RegisterGradient("DynamicGruV2") +def dynamic_gru_v2_grad(op, dy, doutput_h, dupdate, dreset, dnew, dhidden_new): + (x, weight_input, weight_hidden, bias_input, bias_hidden, seq_length, init_h) = op.inputs + (y, output_h, update, reset, new, hidden_new) = op.outputs + (dw_input, dw_hidden, db_input, db_hidden, dx, dh_prev) = gen_npu_ops.dynamic_gru_v2_grad(x, weight_input, weight_hidden, y, init_h, output_h, dy, doutput_h, update, reset, new, hidden_new, direction=op.get_attr("direction"), cell_depth=op.get_attr("cell_depth"), keep_prob=op.get_attr("keep_prob"), cell_clip=op.get_attr("cell_clip"), num_proj=op.get_attr("num_proj"), time_major=op.get_attr("time_major"), gate_order=op.get_attr("gate_order"), reset_after=op.get_attr("reset_after")) + + return (dx, dw_input, dw_hidden, db_input, db_hidden, seq_length, dh_prev) + +@ops.RegisterGradient("DynamicRnn") +def dynamic_rnn_grad(op, dy, dh, dc, di, dj, df, do, dtanhc): + (x, w, b, seq_length, init_h, init_c) = op.inputs + (y, output_h, output_c, i, j, f, o, tanhc) = op.outputs + (dw, db, dx, dh_prev, dc_prev) = gen_npu_ops.dynamic_rnn_grad(x, w, b, y, init_h[-1], init_c[-1], output_h, output_c, dy, dh[-1], dc[-1], i, j, f, o, tanhc, cell_type=op.get_attr("cell_type"), direction=op.get_attr("direction"), cell_depth=op.get_attr("cell_depth"), use_peephole=op.get_attr("use_peephole"), keep_prob=op.get_attr("keep_prob"), cell_clip=op.get_attr("cell_clip"), num_proj=op.get_attr("num_proj"), time_major=op.get_attr("time_major"), forget_bias=op.get_attr("forget_bias")) + + return (dx, dw, db, seq_length, dh_prev, dc_prev) + +def lamb_apply_optimizer_assign(input0,input1,input2,input3,mul0_x,mul1_x,mul2_x, + mul3_x,add2_y,steps,do_use_weight,weight_decay_rate,name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.lamb_apply_optimizer_assign() is not compatible with eager execution") + update,nextv,nextm=gen_npu_ops.lamb_apply_optimizer_assign(input0,input1,input2,input3,mul0_x,mul1_x,mul2_x, + mul3_x,add2_y,steps,do_use_weight,weight_decay_rate,name) + return update,nextv,nextm + +def lamb_apply_weight_assign(input0,input1,input2,input3,input4,name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.lamb_apply_weight_assign() is not compatible with eager execution") + result = gen_npu_ops.lamb_apply_weight_assign(input0,input1,input2,input3,input4,name) + return result + +def dropout_v3(x, keep_prob, noise_shape=None, seed=None, name=None): + """ The gradient for gelu + + Args: + x: A tensor with type is float + keep_prob: A tensor, float, rate of every element reserved + noise_shape: A 1-D tensor, with type int32, shape of keep/drop what random generated. + seed: Random seed. + name: Layer name. + + Returns: + A tensor. + """ + x = ops.convert_to_tensor(x,name="x") + if not x.dtype.is_floating: + raise ValueError("x has to be a floating point tensor since it's going to be scaled. Got a %s tensor instead." % x.dtype) + + if isinstance(keep_prob,numbers.Real) and not 0 < keep_prob <=1: + raise ValueError("Keep_prob must be a scalar tensor or a float in the range (0,1], got %g" % keep_prob) + + if isinstance(keep_prob,float) and keep_prob==1: + return x + + seed, seed2 = get_seed(seed) + noise_shape = _get_noise_shape(x,noise_shape) + gen_out = gen_npu_ops.drop_out_gen_mask_v3(noise_shape,keep_prob,seed,seed2,name) + result = gen_npu_ops.drop_out_do_mask_v3(x, gen_out, keep_prob, name) + return result + +@ops.RegisterGradient("DropOutDoMaskV3") +def _DropOutDoMaskV3Grad(op,grad): + result = gen_npu_ops.drop_out_do_mask_v3(grad, op.inputs[1], op.inputs[2]) + return [result, None, None] \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/run_1p.sh b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/run_1p.sh new file mode 100644 index 000000000..00e54b0e3 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/run_1p.sh @@ -0,0 +1,2 @@ +cur_path='pwd' +python3 ${cur_path}/mnist_convnet.py > loss+perf_gpu.txt 2>&1 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..2e80a6a89 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,193 @@ +#!/bin/bash +#当前路径,不需要修改 +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=64 +#网络名称,同目录名称 +Network="mnist_convnet_ID2524_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +train_epochs=50 +#训练step +#train_steps=5 +#学习率 +#learning_rate=1e-4 +#ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=${cur_path}/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file=${cur_path}/../configs/ops_info.json +fusion_off_flag=False +auto_tune=False +fusion_off_file=${cur_path}/../configs/fusion_switch.cfg +############维测参数############## +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done +############维测参数############## + + +if [[ $1 == --help || $1 == --h ]];then + echo "usage:./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi + if [[ $para == --ckpt_path* ]];then + ckpt_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path/ +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 mnist_convnet.py --data_path=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait +end=$(date +%s) +e2etime=$(( $end - $start )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +#Time=`grep loss: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v 'Test' | grep -v 'ETA' | awk '{print $5}' | tr -d ms/step | tail -n 2 | awk '{sum+=$1} END {print sum/NR}'` +#输出FPS +#FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${Time}'}'` + +#由于loss和性能取值不连续,所以每次只取每个Epoch的最后一个loss和性能值 +Step=`grep 844/844 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +2 | awk '{print $1}' | awk -F "/" '{print $1}' |awk '{sum+=$1} END {print sum/NR}'` +Time=`grep 844/844 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +2 | awk '{print $3}' | tr -d s | awk '{sum+=$1} END {print sum/NR}'` +TrainingTime=`awk 'BEGIN{printf "%.6f\n",'${Time}'/'${Step}'}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=`grep accuracy: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v 'ETA' | awk '{print $11}' | awk '{sum+=$1} END {print sum/NR}'` +train_accuracy=`grep 844/844 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $9}' | awk '{sum+=$1} END {print sum/NR}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2etime" + +###下面字段用于冒烟看护 +BatchSize=${batch_size} +#设备类型,自动获取 +DeviceType=`uname -m` +#用例名称,自动获取 +CaseName=${Network}_bs${BatchSize}_${RankSize}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +#TrainingTime=${Time} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +#grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v 'Test' | awk '{print $8}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +grep 844/844 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $(NF-9)}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +##获取错误信息 +#系统错误信息 +#ModelStatus="图执行FAIL" +# error_msg="EZ3002" +#判断错误信息是否和历史状态一致,此处无需修改 +#error_msg="Graph engine process graph failed: EZ3002: Optype \[Conv2DBackpropFilter\] of Ops kernel" +#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#DTS单号或者issue链接 +#DTS_Number="DTS2021090622224" + +#关键信息打印到CaseName.log中,此处无需修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RankSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..966a742d3 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,194 @@ +#!/bin/bash +#当前路径,不需要修改 +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=64 +#网络名称,同目录名称 +Network="mnist_convnet_ID2524_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +train_epochs=3 +#训练step +#train_steps=5 +#学习率 +#learning_rate=1e-4 +#ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=${cur_path}/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file=${cur_path}/../configs/ops_info.json +fusion_off_flag=False +auto_tune=False +fusion_off_file=${cur_path}/../configs/fusion_switch.cfg +############维测参数############## +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done +############维测参数############## + + +if [[ $1 == --help || $1 == --h ]];then + echo "usage:./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi + if [[ $para == --ckpt_path* ]];then + ckpt_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path/ +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 mnist_convnet.py --data_path=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} \ + --static=False > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait +end=$(date +%s) +e2etime=$(( $end - $start )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +#Time=`grep loss: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v 'Test' | grep -v 'ETA' | awk '{print $5}' | tr -d ms/step | tail -n 2 | awk '{sum+=$1} END {print sum/NR}'` +#输出FPS +#FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${Time}'}'` + +#由于loss和性能取值不连续,所以每次只取每个Epoch的最后一个loss和性能值 +Step=`grep 844/844 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +2 | awk '{print $1}' | awk -F "/" '{print $1}' |awk '{sum+=$1} END {print sum/NR}'` +Time=`grep 844/844 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tail -n +2 | awk '{print $3}' | tr -d s | awk '{sum+=$1} END {print sum/NR}'` +TrainingTime=`awk 'BEGIN{printf "%.6f\n",'${Time}'/'${Step}'}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=`grep accuracy: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v 'ETA' | awk '{print $11}' | awk '{sum+=$1} END {print sum/NR}'` +train_accuracy=`grep 844/844 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $9}' | awk '{sum+=$1} END {print sum/NR}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2etime" + +###下面字段用于冒烟看护 +BatchSize=${batch_size} +#设备类型,自动获取 +DeviceType=`uname -m` +#用例名称,自动获取 +CaseName=${Network}_bs${BatchSize}_${RankSize}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +#TrainingTime=${Time} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +#grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v 'Test' | awk '{print $8}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +grep 844/844 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $(NF-9)}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +##获取错误信息 +#系统错误信息 +#ModelStatus="图执行FAIL" +# error_msg="EZ3002" +#判断错误信息是否和历史状态一致,此处无需修改 +#error_msg="Graph engine process graph failed: EZ3002: Optype \[Conv2DBackpropFilter\] of Ops kernel" +#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#DTS单号或者issue链接 +#DTS_Number="DTS2021090622224" + +#关键信息打印到CaseName.log中,此处无需修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RankSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/test/train_performance_1p_static.sh new file mode 100644 index 000000000..db30dc3ac --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/mnist_convnet_ID2524_for_TensorFlow2.X/test/train_performance_1p_static.sh @@ -0,0 +1,194 @@ +#!/bin/bash +#当前路径,不需要修改 +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=64 +#网络名称,同目录名称 +Network="mnist_convnet_ID2524_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +train_epochs=3 +#训练step +#train_steps=5 +#学习率 +#learning_rate=1e-4 +#ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=${cur_path}/overflow_dump + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file=${cur_path}/../configs/ops_info.json +fusion_off_flag=False +auto_tune=False +fusion_off_file=${cur_path}/../configs/fusion_switch.cfg +############维测参数############## +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done +############维测参数############## + + +if [[ $1 == --help || $1 == --h ]];then + echo "usage:./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi + if [[ $para == --ckpt_path* ]];then + ckpt_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path/ +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 mnist_convnet.py --data_path=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} \ + --static=True > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait +end=$(date +%s) +e2etime=$(( $end - $start )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +#Time=`grep loss: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v 'Test' | grep -v 'ETA' | awk '{print $5}' | tr -d ms/step | tail -n 2 | awk '{sum+=$1} END {print sum/NR}'` +#输出FPS +#FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${Time}'}'` + +#由于loss和性能取值不连续,所以每次只取每个Epoch的最后一个loss和性能值 +Step=`grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v Test| tail -n +2 | awk '{print $1}' | awk -F "/" '{print $1}' |awk '{sum+=$1} END {print sum/NR}'` +Time=`grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v Test| tail -n +2 | awk '{print $3}' | tr -d s | awk '{sum+=$1} END {print sum/NR}'` +TrainingTime=`awk 'BEGIN{printf "%.6f\n",'${Time}'/'${Step}'}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=`grep accuracy: $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v 'ETA' | awk '{print $11}' | awk '{sum+=$1} END {print sum/NR}'` +train_accuracy=`grep "Test accuracy" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $3}' | awk '{sum+=$1} END {print sum/NR}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2etime" + +###下面字段用于冒烟看护 +BatchSize=${batch_size} +#设备类型,自动获取 +DeviceType=`uname -m` +#用例名称,自动获取 +CaseName=${Network}_bs${BatchSize}_${RankSize}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +#TrainingTime=${Time} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +#grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v 'Test' | awk '{print $8}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | grep -v 'Test' | awk '{print $9}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +##获取错误信息 +#系统错误信息 +#ModelStatus="图执行FAIL" +# error_msg="EZ3002" +#判断错误信息是否和历史状态一致,此处无需修改 +#error_msg="Graph engine process graph failed: EZ3002: Optype \[Conv2DBackpropFilter\] of Ops kernel" +#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#DTS单号或者issue链接 +#DTS_Number="DTS2021090622224" + +#关键信息打印到CaseName.log中,此处无需修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RankSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}_static" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log -- Gitee From 17fafb29694db9c635d6a9eb121450186eb9a235 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:19:55 +0000 Subject: [PATCH 44/54] =?UTF-8?q?Oct-ResNet=5FID2890=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 284 +++++++++++ .../README.md | 237 +++++++++ .../ResNet29v2.pdf | Bin 0 -> 40660 bytes .../keras_resnet.py | 449 ++++++++++++++++++ .../model/__init__.py | 29 ++ .../model/resnet.py | 363 ++++++++++++++ .../modelzoo_level.txt | 3 + .../requirements.txt | 0 .../score.py | 76 +++ .../test/train_full_1p.sh | 213 +++++++++ .../test/train_performance_1p.sh | 213 +++++++++ .../train.py | 358 ++++++++++++++ 12 files changed, 2225 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/ResNet29v2.pdf create mode 100644 TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/keras_resnet.py create mode 100644 TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/model/__init__.py create mode 100644 TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/model/resnet.py create mode 100644 TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/score.py create mode 100644 TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/train.py diff --git a/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..e19ca1c3f --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/README.md @@ -0,0 +1,237 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Image Classification** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.4.8** + +**大小(Size):210KB** + +**框架(Framework):TensorFlow_2.6** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Official** + +**描述(Description):基于TensorFlow框架的Octave Convolution网络训练代码** + +

概述

+ +- OctConv全称为:**Octave Convolution**是一种即插即用的卷积单元,可以直接替换常规卷积,而无需对网络架构进行任何调整。这种卷积可以存储和处理在较低空间分辨率下空间变化“较慢”的特征图,从而降低内存和计算成本。 + +- 参考论文: + + https://arxiv.org/abs/1904.05049 + +- 参考实现: + + https://github.com/tuanzhangCS/octconv_resnet + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + + +## 默认配置 +- 网络结构 + - ResNet v1 + - Stacks of 2 x (3 x 3) Conv2D-BN-ReLU + Last ReLU is after the shortcut connection. + At the beginning of each stage, the feature map size is halved (downsampled) + by a convolutional layer with strides=2, while the number of filters is + doubled. Within each stage, the layers have the same number filters and the + same number of filters. + Features maps sizes: + stage 0: 32x32, 16 + stage 1: 16x16, 32 + stage 2: 8x8, 64 + The Number of parameters is 0.27M. + +- 训练超参(单卡): + - Batch size: 32 + - Train epoch: 200 + - block_num: 3 + + +## 支持特性 + +| 特性列表 | 是否支持 | +|-------|------| +| 分布式训练 | 否 | +| 混合精度 | 否 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +拉起脚本中,默认开启混合精度传入,即precision_mode='allow_mix_precision' + +``` + ./train_performance_1p.sh --help + +parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + --max_step # of step for training + --learning_rate learning rate + --batch batch size + --modeldir model dir + --save_interval save interval for ckpt + --loss_scale enable loss scale ,default is False + -h/--help show help message +``` + +相关代码示例: + +``` +custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(args.precision_mode) +``` + +

训练环境准备

+ +1. 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909)》 +2. 宿主机上需要安装Docker并登录[Ascend Hub中心](https://ascendhub.huawei.com/#/detail?name=ascend-tensorflow-arm)获取镜像。 + + 当前模型支持的镜像列表如[表1](#zh-cn_topic_0000001074498056_table1519011227314)所示。 + + **表 1** 镜像列表 + + + + + + + + + + + + + +

镜像名称

+

镜像版本

+

配套CANN版本

+
+

21.09

+

20.2

+
+ +3. 运行以下命令安装依赖。 +``` +pip install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + +

快速上手

+ +## 数据集准备 + +1、用户自行准备好数据集,包括训练数据集和验证数据集。使用的数据集是wikipedia + +2、训练的数据集放在train目录,验证的数据集放在eval目录 + +3、bert 预训练的模型及数据集可以参考"简述->开源代码路径处理" + +数据集目录参考如下: + +``` +├─data +│ └─cifar-10-batches-py +│ ├──batchex.meta +│ ├──data_batch_1 +│ ├──data_batch_2 +│ ├──data_batch_3 +│ ├──data_batch_4 +│ ├──data_batch_5 +│ ├──readme.html +│ └─test_batch +``` +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2. 1单卡训练指令(脚本位于BertLarge_TF2.x_for_Tensorflow/test/train_full_1p_16bs.sh),请确保下面例子中的“--data_path”修改为用户的data的路径,这里选择将data文件夹放在home目录下。训练默认开启混合精度,即precision_mode='allow_mix_precision' + + bash train_full_1p.sh --data_path=/home/tfrecord + +

高级参考

+ +## 脚本和示例代码 + +``` +|--model #网络结构目录 +| |--__init__.py +| |--resnet.py +|--test #训练脚本目录 +| |--train_full_1p.sh +| |--train_performance_1p.sh +|--train.py #网络训练脚本 +|--...... +``` + +## 脚本参数 + +``` +parser.add_argument('--data_dir', default="../", help="""directory to data""") +parser.add_argument('--batch_size', default=32, type=int, help="""batch size for 1p""") +parser.add_argument('--train_epochs', default=1, type=int, help="""epochs""") +parser.add_argument("--block_num", default=3, type=int, help="num for res blocks in each stack.") +parser.add_argument("--model_version", default=1, type=int, help="Model version Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2).") + +#===============================NPU Migration========================================= +parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') +parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, help='if or not over detection, default is False') +parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, help='data dump flag, default is False') +parser.add_argument('--data_dump_step', default="10", help='data dump step, default is 10') +parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') +parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') +parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') +parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') +parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, help='use_mixlist flag, default is False') +parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, help='fusion_off flag, default is False') +parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') +parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') +parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval, help='autotune, default is False') +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡或者多卡训练。单卡和多卡通过运行不同脚本,支持单卡,8卡网络训练。模型存储路径为${cur_path}/output/$ASCEND_DEVICE_ID,包括训练的log以及checkpoints文件。以8卡训练为例,loss信息在文件${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。 diff --git a/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/ResNet29v2.pdf b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/ResNet29v2.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5858f7b256eadf56ffaed5ef453d4ec39b631ffc GIT binary patch literal 40660 zcmV)NK)1goP((&8F)lR4?5av(28Y+-a|L}g=dWMv9IJ_>Vma%Ev{3V58&y<2ZBO_CkdA}Hj zfuUs^7HnhWY1t2gAdGu1lF)3AMwSHo*E`mV$f_c1SCN(6=Wx*I?vs0G7HdajM#g1D z#DBc~%cuW%i{+_z`5zx!f37J!{ocECPyhJ0PygrBpKtyN|M|zifBI*C?VkSrKb_3} zyPJE$fByIXNf!K98T<=b{2y-r`rkacXZQ3sH}9VQ*QZ)D*8AuD0{;m8w|K@-EUVr;X=_8))T{>>l$#qEFi!}?=#mp|KblUEKsTzlo>ysVAo@RjkW|BLYL4P7T;?<{?}mE47< zy|xU}W`A+(m&(;8P7zo4zsK12xO@mYZx?{hH#y&XLf0rnjG$QvvSmQ%D!(ya6O$?%(~= zms_SZ?A6P0dWyAPBeQHBtS;RhlKKAv=^zPI<|){yD8x)ED_(#6F8dktARN+f z|KE?_`?9Rue*5<_)&KTq6|2fF3fH@G_C+3+_kZ+%!`z#4@XjBNZ~tf=fAjyKK=*gJ z?4M%+ky9UB4*j`QkT`kevWT+cF$Z)8mjf6g2d>K2IOeiE$8nOZ>U?mNe>7v1@>m`a z z$%L$PATKB%_Y39V_QIn_#`<>g;1&p-bJl`rN@XM6f8jaU9S>#pJ9hnr%xJRwHrAJQ zeHkC<`u(|*EiMvN-maE$u0-z&!j4`_d-igZR42@kOQ9FJTD%afI{)lDK1l6Z2zGe= z%MZshWuDyT6-T7>DoY0~dA9ITEBJfaJ$q;O|9+{QF5GXqfET`AJYCcsa!OxjfzZ9` z9X06wX3g#HqW$J?_-!s4%{k1n$05gbfcqs?8whg_-8@K-UpLmmlIx0G-^xjoQ7Wv@d&X6BpWaG;;S+BQ- z7o8XzZKZs5P4+HmBXVuIHTFwiZZStPMECYp1uPco6=xjySOLqk3RsbAH{l0?%&O^T zu^2^qfXtCHH}i_dJBs{vvaPd-keV^0h_n$k^gx7UVOZicp)QG)x(B03E7fzebI&D_ z%z+93$h=?pa!X~|;_|1jDPQTKJ6tc{_*Rbps~_WZStj{7N5ipzE`jx3Pv z*dtjWKiJ78k;9<-Q&)WPJ#Z56rnz1-j1{nT8WZVmcnb zSJ21F-^ORTJo|{V!N*Zc&JZzPd2?^InAd>@M(S}!2*I$#YS{ww>9)?J0u*|8i$c~I6L#k|D%57IDw@X)g*shGqd zoLnEd+>Y)ZFT81;yFj=qdqn<#2XpM)s9MJ7c|E#fWEQ)6?ks+}Rk?=dubx`k*RP&9 z?SK@swebpH$X!M0r1~pDjSh4%F7%d%Z7ruwUQ@3k5P^=~yOeHFVq>yPCD4hCW9ibC z&{f1)(aWe%WdNP;EENM98g~kyb-U`F!sSXG^5jsEobODyzn~p#-|T%rT`eS$Pewbr zJJs?}sd1+YUqzyUzp`WDC+c!}q=W+=3N_h7FL|PYCG+HZSJPPjRa;jg3@^DHh(@|} zPFFAaZYiiI+;mM}R&@+I+bIv_WueeD*@|>rCArvyCk<{DGCGJDOGj#U6FTTiKz-po zgaCpM_NuC4v>N4HgCcNjW)*;UmWqvL{@e4f(md4hc+J9$fHI4U}C z<<#ktf~A);_e7^lDnkU6pjeRac9af<&+vfh09InEHpunF1LnBfIfrLem!vw* zO;6kJdBEQ zcB#i*Br2qAeAN?0PSrGYN-`LN6MNt$maII~_b1%zw7TVh(XE7g4kniT*+doM{&AcERts1vpBxroW*s?%Pp!iK}mgCuEvxHBVPcp^27|0N9A-3U_5+wt+tEl9i8YA>ikg5mY==c+jh7pMXTj5Y+6zM3^%43IJ4SNSOU^XB^7 zFNEtaY-oJ-0!hhzMB{50NPOG{qJm-B8P zmdFKyTgQhs6YEJ%{-AfqP_e0CkAGOON332V6ueo=B#{`x*c`bIn)4utwO9w;b^rU= zPG}EXc%9Yk(eT|UF^vO}-fYfFDpT&P6zj;VR3IC!dvyY1qGycwSf=!OE3~Vo2r8>g zL7=n4@8Ct-#n~=!aKY2)MWRBe!=Q`6MS3wDLpY3TR&Rk}FG_W8ie{=*+DR>wD_#sB zh#DX2f{Wa*lpD5vh0d)%fX;0?a04*ix|4NiyI2RdgLQDWdwZd3$vNun;gV(F_FT8$ z^D2D5M1-`4;$5D?LM9ExyZ99>WS!o$BNXp?SVTir;IzGrOTz3=WA+hUc9 zwfcsNh}?SqRz!g53QfG~g^CCrLMzr?>JUH)N9qKWJ%%D(8Fy8OZ45!PlCcXJdGJZG zlFTK!TPkGqq9fvU@T#WyM4+|sW-Tz-*2GAL(Jim)8PO5lPvw{sCe&l>1>B8{UgI}a zjU+jHF?m-NQms1GbDW&6TFoW-e&O&)iZK|byeplH%Q5uz9>eci$affCMf5pf3m}Ie zwqF@f)Em{%y?{suo+J59aJS&+i{;u8G;^fmO~$U$ZvgaG6=pSrnBm}n_ptK!i(2UV z(IKmhr!{~OTjd!m3X&w@syQb+na~j6ZWhPfuY93eL`6Z~a*+mIf0py!q(N6j!8vcz zpi@zhr=MVE;UDtrtY(jfqM$4`aZsozaMq$I>{g1Rpdks}9M|2by!TFb$1gGW^LLp0 z9SG!=!8TOp>cAL_!8ubqJybeJMH81S?35$E{hmVU{Swi#wT1iJn5}{8w^@M zNZ;bRi4Oj;0mRjzs3tt99AKy8kaFPr`2o!~N3AF>aeZ?%H{Xww`DO{Om%Q@wy2q0W z{3$Q58Latn2CHuEh15DUQtQA-t%EaCk2A2)O_waB9>-=w>fI9f2FQ@c|C{g)B(C;f z$2Tjf(*u12*m+P1HoLl&yzc6j^SY~BI)OUd7Ui6ESCeMjT|K_v?CN#N%dONt2z}W% zP=qt5WS;U3gh%-X8m@nAKhWFsLBuUZhiu3xBlaL`SVia_2yt%75g{Op1A(+h`XJzo zk+g$6fT|e?Dj_jV9gOO4lV@cH&Z!T+U-)wCb=~Y&F;-nH>{A|m5@Qu-F;>X6>*WoV zyJ&@CUxLa|d71ae-`{{5NUc42=N+-RLK;MskEks30(D54mx$s7e8+d=>q2So;2&-z zBcTRF*W6hwvc*l~*$@MJ0Gut)lG=;0L)Y$#>-4Z^5DNvmrU+r>Xalu`;35nkFPkh9 z_X*ACFgoaEQ_=y9SRZ7OJQ=#FqH+eR6DTQO1AvH*9HBc)MejkZ3^|Y!RE}a0HKeh9 zk<4zE%)s`Ah?DFFnX{NO$h2y;dj=|ingo!%)kcJJ67wTK;jgkkjsq0{e<7!}!6V)n zZAfB7moUghI;W=ipN3>$a8IMKyQN|)q&(<+Ih<5;Jg4)6!b$Nbg_8=rf0TZ1K+Osa zzYdZRc;25X5`i7|6QD=9>cCQw5*Jv`t*_-UlS7}?0E*igErDqzxMS@vDo!9-z;rxpL2U(v7L4yT3FY0Amz$S;%aG^_ zIRthUyIWqY5<(+;*)D>Zh>Z;?8wx!nil+n4h6K0>c|p9AyrXDafXi$Uhap*(D_}7| zRO3zeD}kh;TLc;36FxKbiXe&e3SaLk{&)DAp#LCfp?uY$SH9}NDPL`H!q-%34RH>c z#3lUIdN-83{hl|&`z1(VOoTu3r7R7tb^=8kfp)^z4Ac)#nNz-M)ug zskV_F>oGdvzf37_PpK_hPPQKsZgaHvtANlgd3X1`j4Bt;J0K#>0BJ*fQx(wwxd857f+Bb+q<1Tk0IDF$Ih3sr3ZlFUv#p=p zJt}urjIE6gm24w}CEMr>mpnw&iZZrE3#fJowkm!5xWa@3##=@eQHG$9HHU+ zw><9SQzO6TrbRRC~PhnqS#U#?yW|Wmx_`( zIpiJTRF?(OT_5zlAhIa!HR^#h7ZF5l@S~AsmZQn9!<0bpnx}yy^R!A9-bXer{;2*$ zdI|UkRKY?evt(IjcUH^ArCMW7bEx8`8zy`+s^3_B46iml)Gsh@8GK@8GhW&dlM5Ls1c=ROuYU;Clk-D6Gv^wlq-l2t^2o%gIbNOc2P zj~-ekSJZIhR7gXPd{e$~bhF|1EB;%AuV1H$Sc0}K;uBam(a5kB0ynycKRYFV)4c%v z#8Dcq=ni^DnCPHsxXEHVJR~VG4>y^MP_)Vgm34156Q@ylP&cOOKpy2)B0fWTm4*)* z%c?biQ|NK`UaR8qSy|-H;VTc98MnE;GOjx0mT+UJTI9Xfjgfhdct?HZ{c03-!S&x# zodZ=@`W9fjJ|=K_*Dp31I3-2h0;<-bfvPcp2cCgb4%!*0nm9+Dqk&T{sSBt&*X{R| ztnZh++-hqpsjtIySVZlV)W78Ztlx2e%E78swaCzslrNhPY2hs~gB#isbDhb}LSUa4 z((%kT?9}GN;n8wILG+kJDy=?SoC;KhuiSeiWkbRsOA{DMk->>SmhxCmk=T49 zZ{h8QzfeBH*CCjc$0)WTzSq(2-<1{BZev=7dl5O@B5WPGUqN=@0a2ks>;vA?z|0;B zOhi_6nkVWHYz6KQDJ)=4VZl6ZFGTAqymA29s=(Z@ez|4rLbwkn0%#ddGTi+?Ie?ab zQUDFbuyD|X)eEX$ObL%qkW(L@3NA|rIVD$6tof;k9HeU_xxfJ1K>-@L1z-xn>w$_6 z2+bw6RzPfv=~x_1K0*yjjh@|E3R`92()Dn1`+!TIbY-^baB|FySZ&A*y$!lTL(vA5 zbwDaEbk7Ez5&&p`d>bhbPyTUP2h^6ye3J84S{Ij8gh9Jf8UbbMRZw<4KSDrxw-nhE zL3I%EJ*yIR&PUQIS}|2W+kFc5t(u# z!4b`+fgBOD5-D^NFot(o=0(P>G+<1zL~>@WZVT>ME(hm7xTlrgc1rl&5L zaykv5Z#D*C-UD_BDyN(@1`rzByvR%Sl^MD~5s_e)0M~TS%14ct2Q%RIOOah7EJD8j z48#rkHj-7pcw6@A+p-lF_YirpeGHy%zUC_7>z#DE_?QO(j@G z=DAVaqdGdBlwfNA0m20mihi;dyxFZp3Ffd6`l?ZY?jfgyKC}t^1)D%j0lI2PO%ufM zw!sG-5ct3^Ko$-ZtN_a(A|IwOINuQPJ4uP^t}#$TniMSsiTZfnwx5-hLbw9*CuB{r z(<&Fcr7tYR+~bz>tcX?kkz)pX)k>Z6?oRW%&>8|@T0G|%-q@|ZgS>-gjJ`7ek)HRJ z@4XmsD*zSatZLW8SgGIKIOoO3*;5z0Ps*(R*&b6h!AK} zD@b>=+y@C}VDN}q0h35?c5uw^x`|wRFNqY9j;Q zhgtM^VjV){%4u^AHC0;lQI^HETPe34!QM6h*OLtM=8|RqUHW*Wuqq+Hh9LzUMv~1zR%j@#iovw!3 zR|cRh4vh~{GZ!$|oux0g(iSFMHoHpufC-;8aHms?#2lelhGZVOa#;}!gt>qPNl{JB zv0fkwk|nix9m*6QLmH4d3&bX=qgJXgOJf!I$g<^pR96DPNR}XfXC>+*7sSt|fUUJ6 z_7=YaYzTJCN{0KW=K5^NET1)nA z>X;&A&R-%jRiTYqCAz{xg9M+(FdUk;BzB)Ts%aV+kg8~|fT-Oo!TNcjiB&eO4DD8g zgwm+BxL?Z9I(V>sRdrc=eN`091R@tXX5A@T8%hDfC$;r6v^Ka)5!R5E;yT?vj!=Tl ztM**%qh1!2?VY74irf~yU(X#b=Y0RzHNbu!z^*k{K;`mPw0~XTMVJcie^0E#%f|7mpXi{y0Wiue(ZvJ7?yhtc(|AB`$F|s5fLAji46N;ob#;dLbWuiZOgZLV5-PPQfhv1CNf+ zF>L8bpJFz$`M4T+?^eR8Xnp!{S85}MniPs$(b!PC`)1QXk|rtuoQN}z(p;Twm~S|i zT1^psflxM~qHzKmpp>Nh2eYGMJ}L@TpdApvA@Igm#B!1PfIf-DWLA`3>+J{)BKM2_ zKwsE~O{f6;Dj=~S1G3-$gr4Eg`~;X*yMFJil-pOP*Gd z54nGOkwCpUy-0XoPH#1(R0sJ4jHXMl6yfRYOcLYr@xxm%(QZ)z!1ST}UMsU`h% ztXus_)*PfERaErvwbKP95~>DZA==Mcn*)g{oHTC|839mYi|Ed1d~n!bxv=bGMGail zVtQAQsnXk^5?=3_BI;}~xZ9nj@Fcmxs0mL}J7?4<^~~vfXqsuD@(4FaHLC!~K_l@N z6HQm4a@3Hb)8fLU%ds+#@*7&Kyod=%ygPEU2($pL%!wqXBOD}T8F;5&q}@`ur^`a* ztJerZr5~P#i`U4#u>RyV0<=1FT-MEwk65My3C${j5~HNT1}lnP5~HL-xwE3!=$5A* zsWzgdnp?ct=}@2A`lt~leZLZQ-sFvtbT|Wn6Fab?(&t0{*5y%tYk-3*QW+Qe%P^!t zdl~pN=q>|yR&AI`9XiLPe=@Qyl*c$|ecfwX{5h>ZDO9FWK}cLwSse%d zs+6iuQe=lxgnCnWi&OQq!WoAqn&^BTj!@~nxe_O#Rk zxU=#_l&COG=APU`$UF^IM#Z%4uC=jkTN~NFwb9weWzNN$ZK#XZt*frSw|jTL3XnD? z5KKba@F(3@AZ_7Gjgs}a=DrmL$k1Fw0h?6~QGjl+ABh6a*@%J=-im_p{YDh5OJtA2 z2WQm0j9V-9oSKI=dA?wi+df%nHpYIl5eL<5bVjzZrB))?qJ?l{BW@|TUj^51W=Uzj zo29ZNbOzaq>>#!r3v{FAsI5d1WdTOFvN1(J#;Bo?97Ige=OEB&o&mh4q3MQ93#vQ~ zU*)@%@Vofwgry9Z!BoCdy1?U#A_=hNDQ}3Z1I!kGu%d`Ks?MXP5nR(1l^%d%F1W?u zOa0?hNywp3H{=mTZI28=CFDv)OBWW`)!&S?BG&S}@1FpajpWMOcKMY>%t2-nus6CV zG(5qaA}CJuPwZC$qn7xJ+vA>UTaP<|5#zY0e2Piq+2x$|xbvm9$9;UiIqvHcc-XOh z;Ei41NIA2b+&d3MXiELtLgREoz+kt&eH3DormNz-Ps=Q zAz?+1!Y%FNx-qpHSw-d(%yLHN3iIsI>Kb-A>KcWhDf_bEcw@T2q=bsBf3x~~U_!g{ zObv6yHUekmrLxeywJ+SPB4&73Oyq%zj#zvJOIekmGv|+Kpfxfd#CwDSeHPh@tPvnEEiD}@B61;5?^-R^0J&H0Wg`yWF4qRoI9tJt0Q^9*!n6MEYfua=&+a9Tx*u3PH z6;26Ry-m?+m0zt(_--M}O>$@YnS-9>8Z9B8M;Y#(^DFujUvFk3-M;&Ei{mZ zp3>`KB7ii`PU(by?_Nl9HcW)547qMuGCVxNQduzw!BSZ<@F)1w&XONG$5L5?kz8}F9@7~#;N&dvE!WqU#lKakekUAYdG3gMV?54Em*mGuRhPh3Fj9 z=9`htwE49Ce)L&?vjqFP4_aTx`aZIIL?|E&ypYc}g{>C3J3b;n;0 z(ew{?{KpQd__dY|(4l-)I$M}V$cQcu6*+$_v4ZGWL z4Fdb9o38a-^dpVZgkmIxMW|%Hvl7;oNF$%85LXpZG-N=0k<8Oc!zF5IP}!bof%;bU zV4z2%Q>YV6dZ0pDL9<1iYXbjMC%pm3vb>k$VmqECD8}5 z7dH-}zL>Q_ec!KyNP4REemNV(idmc(Y|TW&Y^oapcLw1sch7`19P0!b9iO@E;N2r zni$eos{zVK+9yDfzPqJF`f3{a-PTW=ETfLQf}!a7Lbx zyhf#O@s(+8G?4JJ!+oxuO66&h-|=PlOA&Eu3+Jy#oXR=pKY7%qlT0XMa@=kTh$}jC zSxwLY0G)H(fHY=xX0y2Du|Vr|gm6}ED$%rL(`Hj>>a?-H4}rE@iDsABIerRYR%3-O z^-lp{_L&ZeKLP6tnx_>l$l*W>Vh{&f5QA`r7Gzx(onvUhV0^K9_-*_BsD}S$31Uup zFmnRy8_$_}C@uR1X-SJ~C7BJ~YRwIj>EMiHW}2Pbs1d8z2A*5I66fg~JP(2kPbS1WBhA$tfYfACFZ`WyqRb$Zvn75v@=#z@%bkbNu z=`HLr(0BlPMGt4jy4Br@o_!mH#9G$1A$;Sq(&gB?08$r`AejbO3~#=!C<&5l;CTy^ z(1j9VspEHNsg_e5biVF`jvpU|i+9MqknOY&x=>->>~wsD7;k~&ZulqAR<^XZ;7S?s zDS#}UW)=XG#98nso2AhEcB?b}$?K$=m;8cv3l+~>kgzq0=Pm5+_ekG#dmNrO5{*#c zryM+p%ng>02AEH=Dy8`W=u<~)Y3>71pI>J+do;wA5_549LFePFv|_5?tpw0_teXM) zx^pIcQs7KuCDFGeQM`n{0DTKxqyy+e-pI>Ydl?^eO>0y6tP=XODX>V6Dc{3KPUi>NT~f*UcHjUO4c6blkHhn!9o zb7EWEF1*ZYI7I?>l{J_JaD-g}%^SHEw+k{fIcs!^xU-Zv)`ia3OBlp+Iv>Itu9U7s zppW<0N=(EDk-k&VeU=$pIKPp^%!&-lNHW4tEt;@(;49RoeljhNa**;w{c8&`duCZ7 znWXV6brtSbLJ7~zfZ4n$ifmSXgg&`we6FrYbG@w~YG(c}r6c8hKkFY%g>DO5IB2E247rMm7Ny49|#2lWKlO zZ)Xayod!^_3`&diaIjqZ4e23XAt>1O_x2M2r8}U?xfI>hoiC%t&mHV%~wKz zQ38Y%BtlH?45Gmc8k@Dn!?9cWLUpN7KDD(NUe{|3scIArD7}<0r&v)u`3Xoem+@I) zys8=EoMlPom8~+B)g~FLZV^mclFaujQGFqgJu*>!A)fD%U*{kAOkcz9MYdCw_mZ6<}N0;w4fBW4MH2?DO|4l*_<**>-;`^ETnJ#aSclK1A z48K(Q z`d&Z1S#JXOUaZ@FokQR3Yu`8f+MgoYbd!4bW?wJ8$;ZC-Znv-fifF4qhAOQudkt!E z=d_;k8uW)D+FW6HdqZ!;%X#Snbj+&PEzo6{QR4)SL_{ypxDE7;LnW3DOm1y!T@0<$ zv#;rf15iX6*g=hLC{NT4_e;f|h0Diw{N*m}Li}?!d~AB9uQkGe(+VDR5PCohG?AnL zK3=Ve90JeVcRR9gVZ+ zW_PRQYKDr4B)b_)*FnO#FJpP;dm3dH-IJFn3t~zLk7#8IX$h z{1$4sNO=pDWImAE6#kt4iNE|2!`&y}?usIqC;-ZmW!A#P5$GWti<&qGBL&hbS#m}T zj%`SUd|Ipj2=wWdt7ygWicT%hXoRdpXEz?W?vR;U*zn4X>37RsZoEAvj|5`Mk2n(3 zB?{2dk{IdEKJUl$_{>LR`}?C%D(%ya3O z##Pw2&+=OCthCIQn5Qsp%xpob!DT1FmFks9w4|>cnVl+K@DZI@Ncn@Nf)D6xXLWwj zRuWS;D^Ry5@TG(x)jF%$qv>lWO(Q-hwvAOb9vHI9+hX9HZCNBV_7jj%V%s>K zLv2}?ES+f+t-LEamPcfBkiwJgF|Y7 z`t&p=gqKslEiz8si2^5A9JLW-h#^Xi=NJF1Kd!AEVY8!dBA*57(yGAiSF?q;Q}-Mt zevN!4#8Xv)ixLy;PfDM46#WSPNDt^K%PE3IwUlE#Gb$oy<7GFR^u+W4zB5Cy`WJ9O zL)Rje`pWyoFSmf`ie@igb_WG`FVB*ff1Nw%Gp$S>pScQDHCP)`UQTvXZJ%i@Jl#!$ zJ0Vm%;f7QQ{saq@(8X`e(g;neU4uJeX(EZ9$Gk@{YXo=hEES#CQRg*AU)Fg&1;|Hv z>W|WS(@)fSaSsxtz;T$d+@XbZbsg-fq&M@KpZk1?~5G_d0nGnqd zAsgWOzh5+ereoMos3Ves1_A+@ZcZD61CNZ0*B9!*Jt;({ptJ;1Kj4V#*tb45x@Qh_ z2NtW>d2;tgunXmr(J4?F7PF?KuS2;gopsqMx2Ig5yDBg@ zIzE%rWq-0MQ5Jk>=R5ddz_9lTSF?JJ?R1utCJrYYI0R!~a0Y zPats%vLhj3Py~VcnvX}3Tuh-sh8ifDeXb)iT(a>GM%|w{3z8WceEsjjOK z)Ks;yn`Z^RO+L^$&7h#v2qHivXHZbDm=w;66gY63!?U1-U!30noO6~|FVcRg@0P+pm7X#Z zHfF%*AwXL`26KGU=`)|j*qUnSpfjUXfR@nON3JRGHpS$X{OC$Ia)DFS6+nSd{212{ zQh-I0Ag4(^IJwIz)(7m3+CuK3oYanhIR%p;!{vk`3GlyG=1(bY8Xdv~Z^Hq* zTZ){=Fpkk>aaMr=_~;N_6leDm=qANkLpXLl7BD7;$&G!u;3jP#8OWs4Y1?3ZJ#pLDXB73R=83j|cK4Z&~HNC^CQ)pidZjD!{lLkaZuh_D))70({#q~c?6y3oeB)|tvvv2X?ujBuDN^fs25U=#;47MJ+jD*XFVOhIJuK;4DC$QJBI}vsG#t zJ|$BYaTX$)ici@s6(b<8s?6oEW%)prISgUk!?+!WEoB#>?gnXTdp2n`Jt1b&J&RH= z9<+-=_biNK>^wlXkM}Ipo9tDOz;1ly2?3CoG#NE(13en@(I{Jts8X(`KkzO@0K77 zx&bZfrrg`IAR~Y3`MaJponwH=%- zt#?aM0Wl&|s7^Txguc1dbL`?pT0gMcq3*qy-t6m8Hv8I_&AxUgW%kAgFZfk+LxUL5kA0kde1ux{3D$OsWH=ZLwkaTGU=aLGA`o03nmg1`UKI1#`Xz?Z%e7#u0_eTJ^4Y)?We{tTNwyov@&Q9$Cl^sZ5g( zT7HiNGhYEPA(8TZ3;=}Fff<}_%Yb4B`N}fblxo#Va~Kl zBlLA(4#AtJq<&-wCH%w?3fc{as2nrt4|BFNKR(jOf})j>4jE1`cBc=zZ_$_d*jdkV zl-vwRx=^pZ%hIa4HO-ID=so0OwrKrssoV*0UrwHkSlmNS#&oO~SnyEa&>47p zf{;OHV%A{QlNZT=)G+rGkIEU4T1;M=vzTRFbkO(;(rl$O^9IJVignevvlO7&2&D8} zzXj+yKviEh@~>1{1#%4F>x{V6$elApcdm*mLoo;7!81jDt?YV_GHn@MzE4`n!u!JA zPPDi_T!aNW)hJa|Wy}ga&L{>mI@R1+N^hCE1J;*i>kZi0cv)OFLN-?6=F0-5g+MbM z)!BN(9K3A8l?&3^{0L zrT+pA1D-w-lLPpog2oO~Jj4WUB}I4j$Q-g-0fZ?Qx{GvmD2e zHpkJQmB6#J<@1~4xOAi)$I<8Aaa@+5AxIsxzFsy38p9c_f1PaomOg-TbhpgZp)pej z#!MZYF%y^J8_wvGg_(_w>E1ANw*=Y~XCdQl!cI#p-I|f|^(*YO_!I3kA-d*n$Fe}( z*ocQ8X}wRB)&RUd10YDzjZ`kE^HFT=6EzYF6NGL=BVRmO7y%56(p}G9TRtjFjn((- zUTy)=1?ARd1u{SzJ>%6U0lCw;jS1ue*+63p8>5aJ?wXi2kPFj_9)^f|VldJiyMvG} z$GP+gD8z!_7bBq(uk6f*KSFX@*w__jPsm3dv1mm;?^YVp!N`2M01c21A03H{_sIiW zs!>d=o7{$e6+tokRRqO!23%8l!a1u5TKBYxpz-}i1g%R@rkfuj?{dZ$&0NpSHdYV! zMTbZFqFM77yPqkY>by&u4WEOYjE*Fs5i$;}YnE81o+fW#VcK@+ypwsfN`uARA!HiS znMzp}Ct#WShKHpEh;V16soctu>vn4TfFGX}GxJEFUYM#vLXTap6|9BO3IOcGiWh-O z9h3EINN1(mft`xvW_UkB)f%<0{D_{4u=J+J!_g=vqw5&l(zH|eOOd3d869&O7K8ym zAj4wX9}VmZYhm zIWYBFHd&^(7j5NvyWz;!W9L7DQ3UaD%W`+3qy~-(31;s9uC*O0jWi)qk>Br@%Eqp4 zL4IG}*Gw-sL4JRd($mn_kelIL7koE_Bho__jCVl&)mB!VH`>Yio$KC4?e>&{XD6y(PBZ3>c!MtB*iH(#XlbSW#8lcKW#xw0cAsH#55 zR@Y1kvYt&rEE`P;axA*!k<@r7sOge*9@7#1I&S4(42^ko1`n2KzF!JK zjh5ZEp|2N~KqAsdxeV{UQ1NPE3Ge-zRGl%lQr9}N-D;z=ttvCp+av>9v}{xFwbb=C zGhx39d4#EGLO=6cG!ZWZ?cT!-7r+L+XHO(<@Ip<*qirLTsU6!Nz2kUx2yl{T74%vOBY;;8|mU>zkAYAm2Ah>~T zt20_wG!2ypIckJrmc+QNY;Jc7#Vmz6#WIhGxmN;DAOT4fLt++9ahH!O2}ccoxAH}F zpSnHdI`md8*4yD`(Mx@`vud${z#-1jqL=>7k?w5Ry?n2&f!-~V9f?cRG6t@4H@FWH zWe&M}r81PKY0ylyA{~*M%Sr-Jt>p(PR)2%3JaB>#+)YA}h+^ zfFhlOv*kcycBq@9BD>cclA}1eoRFC&2j|8V1FNEH_P0sMt{fFqy!d5)~M{3wJADZU_g@VHn-vxj$w5NwZX>7pYL9;bHwr$(CZSB~$ZQHiB zW81cE&+ORP@i+Tk#En1xj;?yTvdhsCopmZRj}-)i$EI*mlV(GAs1hI@A7crsg%?6g z9Uxanqw!`T1&F6|6sJA>M3czlAT_^XF2PHniZPusGcyT$pQfgUO{=moH~?$xW0$WW zAl%6HfExRJx2?Z;mV_u#KI@h+*U(K8?GT{i-}SGU4@$yq)4H#R(r;6@Ge&Tbz#bs( z)HT!nf^mpku5n$|O zZev*U&eq&2ShLqL1Sh|;MRF<7`Uq8~ZoBhjtKENhp}!;}nW|1|F^`wV6?~*#Ak?9A zT*GxLVzzwdtV9p#IsA~RQEC(FHd++2UOPWKF_oo!t=3gh$2cIuGnVHHH9gWbkVTkP zNQk+2JfkVv?j?R|*i1*8hrq^cVN%*P5YfWflJyRv&>q%SZK{BUB~r$HGS8l-YeMXs zm>Gzc#kLlA)|DX#ZOR0DCL$;Z@n@*d6ZNwnnXcH8c|~(1K_a*0n(rPKxN}2cE&oM9 zp5PCe_VJSUWM@Gc6WIiCQS__xJW1ge%hSq*)*#!Tk~}37jZqGAhp4S=#u9`4qMc?D zDd|uz0}Mcc%flo=LIF=Dr=z)YzW8EWNx1=LEyqI&`BZKt)X98uyU}-8IVM&%Fge+p z3ST`ji67*4;x>3NE8kks;Ur(B^D3?1<{k7~5`Q86P+eLb`m&%-^-@+=dr47rwIV$A zg+Uj-B5NNm1e}LR1Q3VEc4KUn*hl3^4DnWh$V4^Yqlo>=meO#@B)ca9!-0$62bu8z zyk*~7nRP3%cFBW=MlT}B^i>LHsnMI1toalt3Xz;R%YbY9*((OIR||-py>6Cw`gkAo zvf@EGFsjK{fAjLvUanB@{+0n|{wKRV0sPaKL$JI~)pw$fr_0cQDc5e(yLzv7lL}A- zLpxOF!-=<>E?_Tp^Xl&HY0T3z=#le~`gglBSgJ~KZq7A(fItvd7@@Ifiz5;{pP!a? z71)X)Ry|~(U00(}3!`;k3?6G%Qh)*6?rq2Sc(Pig_wsAfU~+A?{mvo&w>RY&unKaB zri7`}udtyCTgY?PPSAWlXA+I9XJ~yhK!6v(%&{<+U8)8IPqlON9?N$2MTRv3zT z-TpIb2J3b`s0&ChcA5PL>Oz&V{7*v;n;w-2^TjgQOijF$VZ~;AQbr_df&zEGiZ_Cb zdeLfOg#a&z{3Zs_<~MlYAx&)26O~jN4-+@dPp^LZYRtXBpei`c5=r2p&XT&a&j+E4 z3)bg7Nv_||!~tQi2M7+RjyAA?sz@|iw4b`xat)qu2wW~f6au?gR@<2-T>Aju(bjM5X`K(N|6Pq-56S<`wfEzVuvl1oVrL zP{S`-YH&`Y6bP(Ds}uYZ^3Mq(vAl1^Xz~S$$h{Q=vG3=2&1-{}`GUy| zA97GqVeZ&U4WEea7LWmprLfCf1BlaF&6-~)g z_WNt}O0Qg(uk^RIr>Uyx#$2s#UjM&S#G*C_^*;kWnTI#)j2}1QAmJ#fhtC@H%lt() z!qq3WHKSwl4I_v;0ZExM98M}%Wi~9CGBkf4Wp|1I7RfSgg>>F{m76KW#qBr*ScH;W z&f_)k!hUct=j2D}FM%bE!H@u+m+ha(BB3fJJXvMfOS7EMl0FVtb2CD*b9;go(%}K@ z7)|0s#m-z#SP@A?i1V!Gq$R;fISlM@P#NH|4+J(7+g;n`Iys z1H)P_#^toBylkcO#Nc*h{|>#V7MM1xLk?0B1Z_fcoxVk4}pSp+qvaL`NuABkCS!!`6Bl4n?ePT8&XE4_HeH#n6xbN! zu92_*^Z5U>cRt;=;fR_I}ibwkm3-) zBjHz2r$YBooWWio`($w?4;df9ni}8lG<6#zEpXC00x`f{gt@qtUKP|~ z_eDW67-audvrH2at2zRi2|JB9j}T{ReU?@vR4KalUN2zqU_(8cB-*X_5Kk}S`MGf% z&xcN4U295uHn_WzW49DKG(brZ){7~WQ|zj0XHwkJ&P&yR6sxqmNzLpfd!q!Q{$!O{ z?w)$5HkpAU^0yy*0D?%K=Pdt?roy#8g8MY8k(wW-c#JcAVjsw4JF{X}Mw}LoDyTyR zh6VUVf`VJdn~O((6e%8oFjtxEzZOeFJUfJ2q+g&K%#@`|-b~ukqDPr5e!y;Pnyi39 zrPE4`-T7M3F%4NXQTuBe=5!HRi+LezNF!4vE60qV=-34#rX?TEQ56UD=0$p1xH8Ll zM%b%G(l-ZsU&~W)!K|@CJ#0i57sJZNUX?~GhdaCREl4#qli%FC_YS;TdI~=n=1jv6 zL1~~TFJpJXDT6OdVQ~c?MM~pSkT7i-A7ugZU?TfJt;W)-|F=~_#Z$yf z1)cSE$_f|DKRocqM_6-{QXTTQG@9mS|N4Sw>(HD{&s5Q7eD}S;4-j}!bj|w$wjDY& zXQKDsBG_8scP{>(>9pb_+hFtZ^w+nsc9qoeur4`K@x1}LfW%Ys2hq9_p@?%P$_LG? zP=vk<;1=i!XArQm2R<8};pwrU6bgPqlthedZ}e1+aY@D+TZ7rwd%vGo@>Ks~QYN1T z;@^6IcNv?gn7r<_jDGqH$j)aGEn6f7#usDEW(}A**H85{H!XUI`I(N)bPzu^pTVwk zTN&`lDUxCyvk?rVubdh$4vYZHkWW9z7!4Qa3D*=DA1LKY4cI0Gzt0D{6ERNMc~8BU zb6+CFOaTBHZY*T^hvI%y-CrFBpZY!u2#27e5vx~H#r<_&_3#Ct7XOJ8j-yX13%y*Y zu%Dt`xlN#cfX`$+($@ZSTPlG*Mo9wFhci}*%K_Hx2Mok1`5NMovITPknuLi7XP}!R zKJqElhPC`==DDVY1~$hwoX4R$3K@*n83GLlcVu9N$O5tlhY%;_H5y_SH*`Zvj2EMt z{_#dI7iaR}V9J(+=fGKPo}EucgharWYIFmI+&&eeIrTbj_hSZZp&$#khWfEIA;~Bq zkzFUUNq^~6I@952AU9pe1iVjJ@hw4iYDEA&sh^SW-|(M^l@M&p9(N?g&z(rtij2Bo zzzmECGKV088^8=4|Mj$oBg)_qp@$$-W7GTl2fY({CY|E;AHP*PDS`_yK{y(2*k8q+ zEG1z{|7|BG2ZVrrUu>^>NL)`$$X0u!`U1j~tdIBy+#Oce{0%9g$BgtRM1pt1WH;Sq z_hsPLYH9rSoDZS zEW?NYaw;8nVF3jhCbST^OXbfc2smWyfHkE;YK#m#?hJ%w@`MWh{2HBMY_q=@;xFSN z=K3=bAIvD#mvCGaQ3%*ba}~9Yk^&1M#}z(mnGT@*bPJ{TlENKU{+!;Gj6bYNy{;wk za1A{1LvoIpaA`|+cm)oMVboZaXpUUV`(`gK~7OL}4aR*?uzcq9n}q7tmSHOm9Z%EqvRtD#=YjFrp3(7;Wb!B&Dh z4b0+d0kMZ``YZG0?fnJej-`Mi>sv6hYXEpaNm_CowkPEI5?YS1jI&G~*+^1(Ag z$y#mKift#eEo9ZcCm2WNn6iH(@h?ye-NRZ`hg00wI2SFT6^ojUa3rP^HV2V|7pBJ0 zUv5t<&{dy*ad$8z9*Yr0&S#7z%&EBCS{1i|8L}M}wP|C#rr&<^_%ltYu@VKQb1mHC{jTWeA!v(Nt5J&03MAH_`~4{ zRqU&@3Un^8g@?LoDhY!DLT0C)?9Y2jyu>&&yskxY$e;X77LsLRHi99;bvfnLScw;u z00MlH^8%Sn^2A4p9(6FV#eH09-f=S>Q6nv=Sr3$vz8su+Xy-nO>*|vy%A>1a=D8pV z@J*XWTQ*^+S*7zlx-T0L@A?>-A>ItPV8{D?IZJ(pkqOg`5IyV-8YpeHy{j0vouu@w zP&pzNKHWHyWBT&W8pcX3dVqlF|m z?TQRmxL04e;Vf-27J!z}QHl8=5Wz;~a^;jnEed?XktM`5-BXK^gae>H04|JM2|FCf z6jVi6SOpC}0g@A+noU$e+*GP))*U;zg^D}0DWxKjy`7P7PAxe z7rwgYvt}P}YcREX(uC9-jdsFjD58KQ#0%^Y>>cW4x3=QNxpObq^$^MsQ1+azV|~{Z z1sXl*%A$P?agNm( z#fmkkLGGceMz|t5M#q{t2{ym(Enjt|b-T5Qu&@_D4Gm(ZFUIs#r2UV*z2|E9Sj_)4 z>zmN{0J`_jA7JC;=X_y1Yl+@cqz=HSIO~)Y{qA#0iG^`-t~e?e%v&3AUJOKvEb|fV zunY>%Kij`oG_vh|p5LbR5yYmNZ1&siV|@K>Caz zgo~IrA(VhE6H8X$paj8Wl7^gUDnT$-hzm=ctU$54&z;&p3EZLw6{3<^Y-f2v;F@iv z@s9-Abl;wpSXsQ5noV1H$yvXx{|g8)sjJl@=Ill>M(hM$(kelBrFYys&u$j;)%p`=NG=|YOAj10?8hpQ zE4X%PG^7H?3^t;jjBLJS9F2>#9+e%*%&28!-2R+kg^s^^PR%*Iar(Rdy?t{=gkrK-!8%W*GT0R#9U%I=?A? zQVeYY$y4FPXI|@I6%32@@)LTcI#_2krf#%1BJ|tT5;6Qn)muv^n0YZwFieXej>QCB znnzp&?EoU4;i~NnT5%ooKEj`}uCG!zqN4Er6=aIYi%4ld#onqH?p?^@y9a{9k!=^R z_lNc^cO`L7R`tg?a0`bEBGmGzFRu{GW%dnpd5jdA8+$q7*h`ePM+=CfPCVd+1%0I7 z;$`&uQ*U(2?yNtXOF(V8)`VnlyOr%3=ZFJJuwTt@r6+e8#U=LDA?a>-rgYP{w~20- z&ZAxBv}9uH!d_on!)z+M&J}+h0Wr&~q!}(|g^NM+VlR5d6-ssBEHPxo)xM=^hgQ&A zD43inuk-*pJZ9MG@7li`-VDYUJS->MXV&W}6TQ)sAB7HP5d8nE*(r!=;7XMHbgz|k zP|f@(b{gjJPc#G#(6pwczDCrb3th~C{#?l*y!r2-&S`X!z;6>{VKHE5iYmYg#rnD~ zP0+d)dq|-TOu>j}q4M`mSiFdC(4vsH7tg)cccJht-{eOL+F&eIm+3Dg$3s$4EYYjnR?>Jn+ClDK?(&qQFKBt?H`7;c)0qr zDMsGq$lL(wEKOPA)VtV;oZ7 z5e_KE$iiJo7Shq0K^XIKN)Ec?Sldc;a)X2gehNP~N+Tr3ay?9E8D;W7v&# zh+^mWn<*47XTRgb4m*86&1KZsPV0auAknFiHBzI;?CnbBl*uDxUdSeLxQnJDyqK-r zTEktbGEZdv)x))z4$lPQL{I|pF%R5Q559(J_(R~VAxH@_P8+26G|Q#KE~pA+f9qt+ z9sU+ND7#L{x6Q(sE0|`U2LtTG5aMvE7138>piYvgt%`>v$Ac`yA~x{NM-=e#P;U3` zMG&Q%_8;|%B}&UpSM4G9Rt;xQQY;&SNkiR|u=gxNC)QB~7o}?ksp?W)#3Sij69Bsx zoi+%8Yx+Ah0bvy}`2wAAv4R$=S8J~^gZVWc^c2F9+_DY9C8N3?7i6yTnfy+U`k6=; zZXa7wTWy1B*Z*+WJyGr^y~h9uM_T1_WAh;S6o?K<{;$Z&t$Wcc& z$~9*8^w^+#=~4cBlvH~6J@xb1D>vpv8o}Q@Cj!*Kn%}Ol@NcM%&L;B2 z57?LCvrvTrHD1;n0F{TlREv->FC}LOxQoz)Gglo_ek#OR!GX29_+?UjTsCB)Owue4 zZ1*faergRO4c%L19wPnQ9O<#NlGw+k7b!O#DtiRQvmWi$N5b4A-)%3Tn!!1(t${Z| zf=W}axKYTMtBm|qlv_8uF;^nd#*1k5B@4Noj*%v2!4odRSL{NMT(`F*18cvKnwb-4 zYDsWYKv6!!qHQ6Cxudu%sL=KYdm#b`2X&nXvxk~e)zIb2XI_+N8yG+nd~%}FMsq=E z!$8-I2i?c5!_$}oM2QXLicm%}UK~&n2FQ#*q+!JofI5AQjcryNAVbJe>cIXh$e+l$ zfi0&aXtx>xg6?3x)o~4ekAHiflzHIIm$90uSbV2h0r?lFpnl++TAK|bf~yvdog3hNCsCpjuPv(ej-pDjbi7=>05Y?Jr5a?+wes(9oVUl<>hN};cMjTFP= zoh{T~_{(rsD3$B9o-G9cv($y^&@cs0EZ?nP8`oO68$HVbS6aB)S6a`=+qObF4VRlU zXx(0 zHfar%zMU#H^d>r5>!$#Aq~HIHNBEtDa+>Y!_Ip@ik<_ml{a(q_o(6&wVZi-UebU{9?e8~{gRKQXC^G8tN{*ZbloQqi#Qe zA~lhUWXV?QmM{O2Z83Q6LO}^0F2u7mnQP#nUU^RzifAntO>amI7*VgYvv z?(N4hcy5)Hp1rsa5WLHyJXR-C2RdKNODH2a{?5y5WhPd3l9;CVc~6;D6k{`QFbGr3 zZZqvt#dfmzOjBa)czuHwrK4c7V07px%^KYX4WA_gbe%3!1jVv$<>zj>5Y(n@C#SzC z$sjwU|H54giARG|Be(XrbLg(Aj|F?$ZT&~9!_P>l_E|><8alOTd9Hm3@s@6dhfw7$ zE3b=9nc&Kv6A-R;4fIE5&%dF84Yce%r9_-lMs)ZnQg3FH5o5tJ;Z=YvpPMb;T2OFV zKG)x3A>P$*MbJMe#g|nXy%3~+$Yb!F5P-b-W#4O^?@s=XcfuIAGo}f?ueHz^_LfGY z3D_Er({nq;^Na!6y60d)i2J*GG1-i+=Q+u1xoeV33Qv8d!xSu$wkT}-$Cr|Q_lQ+J z_UrBL5H*O#|AyapcVSW{HceYIUl4QecQ{KjRJpQPjY%WtIuJO@9|BJ?#UHdZSjPU) zcy>-kU>77fa9$HAoc>k7s{FxMd=88dUH)U3Hh+qYW-d_q844)^5o{X40^_fRP+{NP zQ$Q5V@OT}KDc6E7=CF7@`|Fs71L<>S!aK9T8IX4#lS8YLZI3E?uJ%*BN%&vpTp9N< zytU5~04rfv8uB1j9}Rdh1AqD|aNr&Q61;4D7#_~J?dlYEC_*3`FNV2USl%s;O3beP z8KwO3V+zwy^P1MZfG?0AANh>iz7gAcP|`B%)RJs9!^tCm5M1Lo0PgV1!T2AGEyJP% z^P@sg&DaT#0ya`Q)?oU21keDHW0&!VdJ zO8OI4kQWKq04bTZdl1Md1cRf9T$s#VV@P-~8KQ+-kkXMqLize=GQ6@8L#}%31=Z+{ za#%WO*Hy_z0iHbNvi%lBE%5QzdR_FTi^2)W0L$$8rTY}^Wuf*0fhfs4C`icnz;ECr zY^VzI0ut6NNbpyAWwepNT-rYYk);)_*eUy$a+?k~2DGN?D*yvvMzJ+r`Ipm&k_O&T$9#gonieoud07dj zu(0NSlxn}ATVE*9U$-q&{hgX(C%UnN3B=^^i?AaduZIP(eGf5Q{W5vND;$1U_h-g& zz*#czT$gTZf#P58!BKr7!3&vEa(QcU&^Banew2I>%?#qXjt!w)Dl0vym?y8`RUo$Zp=G^49$hSQs3ptyToJFK{vUC~+wxH!vZ_`BT*dC;h@y^n+3gU${WR zplBSuD^Jl5QrtgHGPa)azd1U2t78= zL#h+#@XKF97R3Y(cMQ$#21wFMH^5Li$^vFR^4I+IHwn_+s&39THD!c;o#SMcrT$)E zeI;s3)X0V`1kgg@0g8e9P?3-a76Bcqea!BcoXV`SJ;Np84oFc2ZW&HoYt?&mw5q3> zNt_I@#nQs(kiI^jdvR=lZ?v3)Nr8(U?y=n%a%8OCJOE~V?`WPvSZ)u9DMmxVQryfS zgP$VoudqTn7=T2==KQ}-E{|5e)4 zu06y+N)Kk$ch_0(ddk2_{KlyvR58TFsGF7hO0fFa_|vvk%SJzj)U<#KTmW?(jWY`U0pxZy2=kH#nH?kDxF##)>LCMQNnbu+M(Bt2Iy*tgDe`^3 zYhr<#m&p$3E?|2U1PO&~%4dfiS05Td6vp08my28KMCHWx88)RyHx`pGkpk*&g=XnX z?22AmO-N=EM_n}xWAqhxe24f899wU7=JEx0;(bH&wF`l#%cnANrZL^1nW-#%Lx-2`h{|} z#7qJBr8xmO9IG;N`@JomSR^11)Mx;z@Htnw!pP|9y-)s(`RZS`OcPY9RIxoD<@%^$ zR4UlYnzm+!C1^Im#EZ&?>c7gYyA;|7gi762RS*XTJM3w!Dg z`;}Q0^(N{UUy?j0oom9r40m@G7T;l6bM(p~k@tx$9cQFukxZqh?><8_FOWJ-BeSSG z;(*~%Ki+FI>?k{GLF7#)r;s)$Wb#2(P2Z5ke?lGI{Dc)@k-?aH^cj`u#HC+l(h&qeCuyGc6Tsg~>Rbv~ep7zKv6_pho`5QUE4iXtfcZEcXfrXi#zlFvecOIBdt&{JzoCP zsyD%%_MD?mAl=-Bunsl#UPBiWis!|$jHLr*4|7|CCi_-(8n=kX!X%pmTI(z}XZa*u zJs`%YyF?K1BG74elUg_f`tj*AQ{umSb=><#M7FpTw^X)vpcJ0K8m+FkQ0O z-Hy`Gjn~CjOW?V?GpYLyD5VV#WTy#H6F0Z@ z3@1|d;Eh)q@qR+H6Sr)ZJgN6tyi?qp2^G^N)AG^y7UG~xWh3ow`CgwRM2O0E)BteqNB z&BK$Fpuge0SU>CgC;G%oG79jI;@U9R91>F98WIA*>f0wIr=h->Lqj7i2w<}R*VSa7OAiE=AlPPx znxF!+%Lg8X1-=S5Sb@l4)8MR3ahY79s{=&8a)1=Tm|bSI3J)+}Bo-H_x=bbS{Q!RK z9DYn%ihIeg4S24~%pLgo0>Svwx|8J)Iy|L0y_X`e^Z^kgc6fF?ikx5pIX;OKD3A*X zpehWXpbQ^?>N5hHpo|uNE64+|%M(z7jI>6L9DvJ{j?aIaYbU>HR7pnwRF)T@9HTV9 z0+_%g1O=t}d%pG_L{VWWRNkWgt5Ng2=?X<&Vd-xeETi@JCs(my0pxu0aUL^v4zI{K&A?9msVE9V1>^W8i(SJccpq^>%%daOjR^% zNUAaTZWvZ5{FDU=rQwZ$whZTzo6Zm>@Xi$)#FG~f44erR5`j0gTNi}>g5VlR=$ zYU^+ay&Hl;dK<#RS@M${q>x`|L~o>7Vi6oYIuUI%Ve|}q^{nh%NqWwpk)e*(Tc6Ww zJ$_mkQ-1Yw#NU>i1hR6AD7ZU0;PY?9||QVHVzKMJ|hsdLx6M{D*Jp=ZpW!>Ba19o&QhmpHSqq33On zaUBh8o$UXOX5{fdVo3sLN0aU%G) zk4pb%T^v8oJ_Mp7XpE2Ja} z%_JmNOBlv_Ny|lZnapC3aLG$jmhO@jB#w_2PUbQTyPF$d-|vrWe7BY`&UceXa0VfXpmPMlTv2JfvcF5qOgIHYk`Rm!=+)ZS7Gu6W;048>5c@jAsPJ^J9oMtC z5G8%g=957tD#J;;GvB<~z5em79uD)sAssciH~l{DV6a*+=u>!C;&V4az6%GWlWKm7 z#!|v#Hvzt08n1=m2u`m~r+yHhNz!=gI{hn)*1@5dfjz$uev6_~SF~Ns@rNRG<1?12 zTN+25`Npe;WEaNzAUoI@vQC?+CoenHU%-)QUv{jaHl=r!8rP0%O%?W-kks5%ER`{B z{f*0WEN2)&nXC5K71T5qP7>wbpI%-u6;!hk2wntt_KjlHhZ=$4%_-=S^BAkDQP5i* zb_*28zQp)*SZ>p@?xZKm2yBKR#yB^$U%IvbKqDMx5@u+vI+K=eINsvn)F9h~bCY%0 z$TS)^If3bYxgQsY|FEh}paiAxZX3s6(!^fElTg;70q8wJtJJd!>kiMtWrzi(YWtr>TXzMd9T z#pOcFBtef>>+J&mZ8l^O#?$9LLQiUjfd*~OrI;nm2AtPNoGG6+|1z(JFXSaBhu48S~O2sTHBKi_5bMQHVP zI*)4BR_c^L7MqoftGRkC(GQMq-#9(tKmUn%clVX^q}6J-tJ&N52|jFz{2Y^b1*fM&U^|u!o=2drMXj|NA%d|I zkqvByVufEBV-;m&OBeb}1LpTfx+At1Dl7~}@_0hpFv|SOkrCZ~dsy*kV;&Xn{S;uA zz94HA$i)pdJ|89>zhA~U_0_J=bA3|pkeWU&0~V)!$X^T6hN9uZ$r{1Hp8kmnMb=Rk zEodC7{$OAp^S}liA^7lZkGmWmhK@p^Fano!BCJyrMBGTQALmI+f{n7s3LB~{F42_2 znaJuGn{bL*sQN6PIOC3X*B=hZic&M}447B1Wd@tk_)i#mH z$?N|9I)jbpCfDmeR9=h6`^~wHCFU$#H*T#MiOl+UntqM;e8v96GPH*B%q<6GhB8Tn zrwbw%v}p7^DpcfKzHexcfL=lEV(TeqcAj@qqcz`_SH*YAuixpN@*#r;k$v2SP73T( z1>%D9<;N<;*>p!2_M8WaOISs*qLP-#hpc11h#h6_j;d}iF7}79djzw8W$tlOCg!I4 zu}m&+WV;v8)^&&X{IvY}S*=i+{=}}zo`L*1H`i(XFLQNCdRAgws?+Zu>540 z-l3erTx)O}{Vr=nqQn3t#iqCE~uDpNOWeHCk`sb@_WGoO!-6zy0bPIzAF?{u+PZ3`~~F@T|W@rID*u zQ)|0(vOXD^T!DF(@U)=0lj9*f;Mp3~%_E@Cawta4o6lKd^bG5>lxZnjo>q+BT228> zVJ>5Y_Af5cjhu?eL{G2luRP!wR5V*RzCxlKF`K(LM;aEhonKQ!r1*D_Tx)F7y4s&c7~R7c|{#@$=csm2e8_tY7B_{q5frF&uU@hNJmhScH?opE%7 zvR5S<3NIgPL7&QrTzmfKS|`9xuait?m{IQwlNr7J3fsyJ z0!B&itIc(KpDmmAX}@6Mn0Qo=_ep9fkC2?;Ca4RpNlQlg0Zm=fs2OrT-IG_(#&X4S zNd^oF$h27xodLv%*aLt=C+xtP@NA0{B6nf~YT40P?fMx-F#l%?gWNC3F6*2T`sS~;l1R` z?fBAQt^Yh))Vjj?bLWhFli(Lb(mH8vY8n>GmhcYo;g0{tFc0i(nbsp%cSXoA%ss1U zO;F^9>XXAY4$DKEOylH{(s>p<02_?hzyT8H)-T(^-h%c}@Oi^QDOXaR z;cC^|6E!Qh;6xxAgOR~8$uF^{OR-L9~sQhM?qMrMDSt#Wewv_W)9_TwM1B@=gIjsyvY zWM(4WNg_>aGgkv^%CRB9WEVOyLmgT(qwtDz5)wF@B}6_g5> zo@~pt6dLFhxfPjz68k*YyqF+Ho}??~is^7R)Z~Mg(=7&0J?sgU6u+i7EOzlOhw}nh zski8^`G=U%HgXnr=DJkXIQG-=dRz&#YuoCN$xR?PR_|`tpS_lTmv$i6dLsG`HJJzl zP;9~a&c|yVs$b0|NGg?3k5AE@`pDtXvNM3zeEn({ zRuA>_GzYH!<7~v7t%A|hwn>?`gck^Ow)i5d9Nv3q+38ESFLr4nfTV{O0OtyCj7Rt| zOhxF!_=KDfMo^4{4olXCAY*_r6QqEQRLxM(mMMU6)v@!?md%zw-oz7xc?!ra**_u6 z$okFatO6t`f)aKHkSAryFqa`^9P2N@1rvO!zoQx=&e6IRIO*URCx?)XK&P?IVfxJ9mQXp9{#fE;O0{?X&lRUr9d!#V(Nv*%S{66T*{CZ=3$3%C! z_EJkveZGbZyCW&*&6J*KSED(t0RzLM_f%}ZQp~B?s++5!{G+mFYK+S_ghc=@5h+Mk ztwtv&B!%{zqLets0WC;H<^mo^7oHSw9mA1OZ_SXD3?jTy4@fXosJ(F-b^p4jZqe&96 zyf|ecNvX04$6INn4Z3YK!cl~L!^)1}jkjaF6s#u~WeAhWS`C;8f4!M&QrIrnWD z*QI6qAR#-KVs*f@W&M$1K+ip$vVUb6833^%i(Kg+Oc#FUQS6)34h?*0mxOkA3xPi# zZc@2xt-@0|Tfoy{R&ATuUgA_JuYd~RtRRC1?k~{{$n;gU`WIdW=Uuoov9|jcu(_dn)P8h+>C`J$dXB! zD-yR+bJF81)cd&>QDTE>t)tr>kI1{Qh2GoLBT)>S#t0-(kEnv;|^K!Cc36S;%X*n z3=}s~BEGX*cz2Zi$=&?NA7F7gA;GxzwWV_6^-Ah2_= z#G$iMxQ_$Uv&1I9+1$@Wp=3R8k7kpba`s}o;>rD!pxIBJF4x0SR>T_b5}$3p#>1yi zzrQ(wg98E7TY&MKfmA~1cfs7{kM8W?*5Uf!v|_x!4T3WB0a@TM58%QS$TtndJL zWq{+;#ka#4C_Y=vx9(?Li-Saw<3Xl`^p>*lkGi97`_%ZjfBy3GE9muNKP=e4S#4a- zS0IKJe^Db4i!bT}Ek7l8pkeSzbc>eD+B4P8Wz@`Z%~bt!*TDE!a!CsNgHe7%v*4um z1-;m#iK|u`RqJi`m~0{guj@1VEja4EG0-=KCOY!Fzy_a@;asW9;swf znq~Fo+3SazyKt9Xk6Djfk6n*{mv@(Wmz&?A#;)6|+pOEo=fG31sB?bt)JDeo5!W97 z7UvdEf@jYu;;i{BtUAml%m{81o*Qm7wju`s6W>fj0zp^oo9;y*rjYf~vZ!nhkqR02WKQ@6~wR$R# z-C+h7GawK3h66$3{iOex*(~;3QB!PX<(`)wjOarXpiLJ7BSKVq*ccbxO7NotBPkb@ z0wW`Y5mYWw1Xhx(DBgHPaw#}@s{Db7p{xOUdmVY5ECM9V=(;VCL##lW**;WsKiY5O z$cLsz@T*!TsJFLb`rZ#Das;%Q z-h|vR9Adm#rAK$bg9nu{e5HCloZtGwb7K93r?vq_Xo&|UU+J1!1y1GdM`#?;Y$M@I ze0% zuv)U}{ z)IS@ZsCmgkwPpWx-O(TD0SooPGa2GI0Bt+09XAqh_!;lDApGiA^w1bqc>;4qw53ti zyhSwg2)51$NRD+O@#?znaci>RA!Sw$S?Wa|uk&Q~cOTr@Ie_Qw z;DprISbut3yl#S6vksFimLOS6z)lMV z{NE2XM6D+6?PIz}Y^8MKqLiY=L+bI@l6yKm%CPqi*-48UtYcHf;;zo%(#Kt!j)t5` zKA9_}A*e8{Zs)@q33TDz6Id`643&Q~V*AUOXNNm+lFpAw1eP!Zqn zpbQ(QSQ3VY3n5er;tWnEiU&!pR);$>@9!OJN2#eMrn!;ZRu_BgXo%?c;O`@IyCRU~ zg-H~W5MCC2UUgiUbie&b>W8$c@&BjW7r`4`m!=bz@Fqp~AEy^R*WzzHd}yduSPo zPkaC4yV{$*?h4mgOuf6WdvN(Kbuhr;0eN<|m@I`q>1hkZ!y1^YnH-H45y3Ttim8|| zt&^&7OdUyGk6P@`0?8Q6yy=W1kGp|lUtg2ESy^!n-*Fr>Z>&3dA$wm+L0%?f88uUg z4io7jRogd`w{(xiicQUwh? zbV6vt0V#q(dXtVckuD-l1QZnMU8IO~q=+aWz1*E(<9E*azI*RqH$0HNX3eauHM92K z^M-exy?AwQE#=jH#D;@|?Z`;a(0W@CeVa^^!gKfHn>*x@O|6@(J1)6jqI0daG+sDw ze7;*{`q$d&x!Qc)m&PT^evQ|+d|#YpQ&p}urAhEDEt+n6IIh=oo^HLJJ~pafS3$^f zPQW`lTH?N^^8=c~<}eAicf&VQC4*z3aqu&>al*1!%jB=#P#6^J3LgtgsWybW@m7yH zMvg9NGTSlOvDo!f=+(60rxbN|n#e)wYd9su@1`>P*|f6doV0Hkj12GNU!d-JEO*!v zQ=XTjJG$|CxY9w3dhPW9zrkiFt$hqkSxcobH;IS{9@^#%P9ao1+>quApV{-&y$1Kv z5+w;gOC8Ic)6z`OeeQmrDiv{n3VB-6j+^*t7Z+H>nsYtIbMm zW1dv-3RcO!otbD(i`nK!kHS-ulet3emGQu`lSqRCN8D z+p3l;-Hj5H`*>-+x86Mt$0w(9FK>R$w=0S&KIwLLf{SMR)dcX|$t!6(pC-<`wVf57 z0u@U!|JMe9Q3+NI#oS{{nOR^y=Z-5<@Po}Y z3brEI_Lrv1^A0vEO~V)YNL$A57)|a86lj+;G#AX`>&s-Vmak;HrxJIIp#r6qHimRX zHDMoWiM#3L8)${Eec6ohh~JR#dSG})`!D>|&Z{476qhZy+G9L8LskbzZNx7`khgtW zHyI%8?o@b?86Mc1_b_&zbB}K%@_Z)LM$o9ajkx*=zK=L5neEx!ze0SoI)rYL^sA-P zS`i1P^CH#(Z%EK~bDhaK*L{YJ!6!;Rpm6DGqVSzL{SiFHhI{0sx~JEazALVPFClac zFUb(DwYa_@nPyYSmi=fJg{EQ-6S`CrY=j}_3d=3co!D*LTI*j~c(>@Bnd0I4S@AQ%}6pl|9OXf&^s|XN~vc zmp&I#%7mFVd~daQWu|^QS#ICCx>tFti@U}q=S9gt#OfD$IgqQ|yZ8cD z@5$Dm@@k8Dv1vqiPu7_M|F2Jt{@O)XqczSvpGl(4O`vRLcUA1umG-P@Ep2F{ca06t z{;BH)lyPV$a&(!C4TA?UZ4cx6sW2D92rG%1HL1xVythrgD`ecS>pPin_SisO|Pe5 zZz2PU%#+AZ*bwY?Dz=ciLl52Q0Z}z98c3}HaPom_L*n&iHv1Nd-U_=%j#)c+bL9%*6dDbu3k8C zD-*|Pw1VmT*z(ftg<-kXx~i&+f(J|Cu5>FGCC}JtkgoYC)C4@T^uslUh4<3mCI^ zjU%4+wh@0c@x^?~;Z>-`LW~$1$!`b_j*DNkNxIy&IHbeEPdhBC&hUks`@U8W6-LNc zG($v2mhOES1s!ic!CGq|hjFTU=tO3gsg8j%-EfN>Mj8|ya3<;5WeyT>>poTMgBGnv zF>IymYusR`Q%Qo+ez7g7)H5=e4goGm@r)8Xc-oXRtt|M19asO`(CLG3r_daOg-LAT;E*@KZj;u~FLH(RlCbeqomdhy?C-&)P+XV^#Fj}t*dTI*k_nxv? zmwCWq#O(1|AmL%GmOv$8j5a3}9Ty))H5|eUQs*!c`_F z%*ihdTv;I}*U7W!4bv2!_4I}|sbTQbdyCk8*tHAQuQ_lFP*2+2L)j7D-L$#)MD$#W z?5(R;p?#-G)GU;SIRQh;MR#W;o6NBgyMp>HiIX>+N>Hy;>d?6rK-%ZJ@yoi5(H z1uLEm`u)mZzxj@Bp<4RQf?vTJW>%la*Z#GYkU;GxHaIe;q;g}^bN@;&q@<} zwDcocLyR=-Y6v5Y=*wiyy|>n{Q!EA>RRz}Rl21I8x#)A-a-OJw7v$Ho7i7>kbYfrj zyVQW2(R=y4(hHN*1H6=bD$BR>n4=&Asag}fvv`^X)UNYxt8B7C~EbkUTNC$4&{%C5%ft?uE#F&&hU0>B?>Rt6D6xasc+|vNbSw`%zBMd znQ)G6ZtjOw=~zIQw6r~Rs$9DE4#tbx3$(v)QzY#U;rKVl8A7;F|JWAd5b&>E@tYy^ zlc9rU{3uwunxl};XeShodxI5eA{+qvj*7ajf$lXy1xs6mo|~>E3TgFA;R?b5Y3~K% zJL&`BI{A-!?Ge@hZwLHKRqlx9BMgU0f`lRBFc4Tw6bkTSL?wTaeN@m0d!)InqqV&y zNEBx#>uQdr3rT_{a82w4v29^61d0>LBb=2jk=8Z|=2&;QEoSPXMM`5}KP=~O6IH?@M)e@`n@2)|jT+#A2 z2-Hsps3U%~#lXKk^4sAcu_Lk(mNBI0XzpZzbhHMY*GD?aI=Ui%3UD8-kF-GBxB_%3 zh!~b<_5b>TL13W&&MySe`*R#P7z~0##6fTv6!0DPY5|kP@~=ce5J{}7fHn-vJ(GZA zTQGpEg_B8OTN2`cEnxrWc-Uj8DDJN$Fe7oG1q1A`?~=IDfg_l>IItq@RSftP;9XK& z0w;rsf^nLnAaS?^2r32vi32mjYC~~zz>WnRgJDN|2-wa}*Bz0711&+gYu}$?28@5l z`G}VV5Y7OZ4$iaxbTRys!38`P<~V_?^P2b7YoBN@dH9W23GGgqZe%??b^nQCqn(M* zova$rAVEI6nQ*0}f{DKn<+puMiak|v=|rs{V}H!Ny0Lx7qckIq$~QE25C*H{D@yy> zkk&9wUY57@eIa3{-tJ|~>eeHEqbnVCn|oc;9$lNyb11b16p3<5#-0y*Kc4#VNkKI= zV_MjxIIRJ5fJh{CV|II3VXoAkEdSQ`%?@&7$ze$^B-JC*_w~Werw&(`nEL7>sxU#} zF0#g-=9V{n%tW*shX%`hmuamPr(>dxLxF`Wv1=I@#H#_g^%*zmJUBdF-uo33CO9#qnic~p=Yb5)$i=eIQ@kpR2s*C zdCwp=_HTZi!>6?sYMbBt3a=95xT()7NewR8YeT}R8@9r5;?N37% z17oE+Vjdt}+RO@d%N{A%FnHH$XQn>2(uF8K31dijQXG=N2d^exK)(_=BSakU`L6m+ z6(0>e9l3g7rf7<6ZICTZo(k8M|4`8+^>tobJ~ok0|MhH+gimGl*IB=vjMR*TgaNp? z*ECy4sdvYPhMEY6aE&4!%9LT5hLSi6WwTPfR2s+%MX_q(0c4h^grirf_MqTk+WT;> z$#79)lounVV$RU&p_-SZkz+aDu>DZ2ZhPpU6mj;28Om!)hFi&GicM6nJ4W@*fEtE6 zuTQ1Ekc+u31GbQO517xP zC#Jlgzc}~O`vU%{t!NAhkFU)X0Z+Zt)Y#Uk?Js;&Bwr&iYiD0LOnEz|E_t-MUf4<7 zay&6kG&w3M!SphzN^Ug_QzhpKnM-im>8TnM+4b4dyI7w;6%J1Qn8K-*;<+CuaI?Fa zpO)8qY&D0Xj`{u4l~Q(1=P4o<-Ykx}D;<>MUnjRj8l)YkhO{H4=bCysMZ5+AvrmU2 zazZ&nmRIkQDB;t9=`c<1%&b%)gGw`*vdlii>=d>VHuqQuZ(B`PoDN)tQWIB!(PC9}xju^YEN2y6E|-Iz@=la>TonNeZM zp84|e=Pf7S+%v8}clmWV{qz1A5>|8`!kSb{+`h_uRIl^{EVS5|r?uGBJ}Ia(^_e6= zin_gcS!O#oJaHbC*-p8{=~?eu;#1F(=Gxo)C5}blLRQuo#$7(B#=?`PyMnCkllrp? ze9KYLNLE(G8(!l;8>qs4iX{n~F+Mr=qRy9$)P)-2r2?^?JbLkyhSajuf=k~j27bpp5wv&S=*NIP-ZC?ko3Nq2)4w0d+Wae4Jj^tUTN0@In&@V790${X>)ie1` z&zur@c+lHGbc7mPP;+}>($($kL5}cUt^2P!WvYnisjI=0;Gq7XK>w!4_91EJpU>t5 z-%{(%(NGG-FCQ!hHh%$IZ<_+_xyjZIwUcwgu zcRaA#J#po2QBN3HEab(00h#dUJB=NOeydWeAN)3EI9i-GZoecPj7HqpYhy&bZjFA- zB&zwgPOvKdIwkm3Q>8+x_fykn!Y#&B4_cU*Wy09L@twHrTPEu+{%X>fH9Z?aHt7%1 z2~tI~oO`xVbDvlr%Gq)%<8~~XzDl?8V?>59NIbVRBl;P<8ZS4RR)cRu;5tFE#ycXe zHLdad-o=}5-k4Z#8bIIFbweIp+9uA`pNq@0&%$dx%RFgk_$ov_evLr)gLCUYcoFNF zA6^85f8)2mbLp{p=kLVvD02ZsN16I{T?HK-&Hq)VF8*t%|2v5~E@L~E!u`p?|F=|K zUPS@xBy9IluKhb*{}=xN7P?-p09a6Qv~t4w0ZZ}Mv9v}4a0&o7vOrmdB@WeK^Fb|? z1ptA7{G3n40ze~3wAZgLT{mZEdrJo_M!^9PU_)aqj)x3{OWcogPJNKED3Hs;fb19s z19DiCKgtb$?g;E${%xsP`4QCl|E}>TiU2Unk7)}l<7Rh+sw@xyIs&|gg%)BsfOJ%W z@kc(5J)w^U z0xQS9K#o6TV7RC#fIp7OfQA_0)MGL*OcD;jpkp!!3$fHx0*S5)u-Bvz3s 180: + lr *= 0.5e-3 + elif epoch > 160: + lr *= 1e-3 + elif epoch > 120: + lr *= 1e-2 + elif epoch > 80: + lr *= 1e-1 + print('Learning rate: ', lr) + return lr + + +def resnet_layer(inputs, + num_filters=16, + kernel_size=3, + strides=1, + activation='relu', + batch_normalization=True, + conv_first=True): + """2D Convolution-Batch Normalization-Activation stack builder + + # Arguments + inputs (tensor): input tensor from input image or previous layer + num_filters (int): Conv2D number of filters + kernel_size (int): Conv2D square kernel dimensions + strides (int): Conv2D square stride dimensions + activation (string): activation name + batch_normalization (bool): whether to include batch normalization + conv_first (bool): conv-bn-activation (True) or + bn-activation-conv (False) + + # Returns + x (tensor): tensor as input to the next layer + """ + conv = Conv2D(num_filters, + kernel_size=kernel_size, + strides=strides, + padding='same', + kernel_initializer='he_normal', + kernel_regularizer=l2(1e-4)) + + x = inputs + if conv_first: + x = conv(x) + if batch_normalization: + x = BatchNormalization()(x) + if activation is not None: + x = Activation(activation)(x) + else: + if batch_normalization: + x = BatchNormalization()(x) + if activation is not None: + x = Activation(activation)(x) + x = conv(x) + return x + + +def resnet_v1(input_shape, depth, num_classes=10): + """ResNet Version 1 Model builder [a] + + Stacks of 2 x (3 x 3) Conv2D-BN-ReLU + Last ReLU is after the shortcut connection. + At the beginning of each stage, the feature map size is halved (downsampled) + by a convolutional layer with strides=2, while the number of filters is + doubled. Within each stage, the layers have the same number filters and the + same number of filters. + Features maps sizes: + stage 0: 32x32, 16 + stage 1: 16x16, 32 + stage 2: 8x8, 64 + The Number of parameters is approx the same as Table 6 of [a]: + ResNet20 0.27M + ResNet32 0.46M + ResNet44 0.66M + ResNet56 0.85M + ResNet110 1.7M + + # Arguments + input_shape (tensor): shape of input image tensor + depth (int): number of core convolutional layers + num_classes (int): number of classes (CIFAR10 has 10) + + # Returns + model (Model): Keras model instance + """ + if (depth - 2) % 6 != 0: + raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])') + # Start model definition. + num_filters = 16 + num_res_blocks = int((depth - 2) / 6) + + inputs = Input(shape=input_shape) + x = resnet_layer(inputs=inputs) + # Instantiate the stack of residual units + for stack in range(3): + for res_block in range(num_res_blocks): + strides = 1 + if stack > 0 and res_block == 0: # first layer but not first stack + strides = 2 # downsample + y = resnet_layer(inputs=x, + num_filters=num_filters, + strides=strides) + y = resnet_layer(inputs=y, + num_filters=num_filters, + activation=None) + if stack > 0 and res_block == 0: # first layer but not first stack + # linear projection residual shortcut connection to match + # changed dims + x = resnet_layer(inputs=x, + num_filters=num_filters, + kernel_size=1, + strides=strides, + activation=None, + batch_normalization=False) + x = keras.layers.add([x, y]) + x = Activation('relu')(x) + num_filters *= 2 + + # Add classifier on top. + # v1 does not use BN after last shortcut connection-ReLU + x = AveragePooling2D(pool_size=8)(x) + y = Flatten()(x) + outputs = Dense(num_classes, + activation='softmax', + kernel_initializer='he_normal')(y) + + # Instantiate model. + model = Model(inputs=inputs, outputs=outputs) + return model + + +def resnet_v2(input_shape, depth, num_classes=10): + """ResNet Version 2 Model builder [b] + + Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as + bottleneck layer + First shortcut connection per layer is 1 x 1 Conv2D. + Second and onwards shortcut connection is identity. + At the beginning of each stage, the feature map size is halved (downsampled) + by a convolutional layer with strides=2, while the number of filter maps is + doubled. Within each stage, the layers have the same number filters and the + same filter map sizes. + Features maps sizes: + conv1 : 32x32, 16 + stage 0: 32x32, 64 + stage 1: 16x16, 128 + stage 2: 8x8, 256 + + # Arguments + input_shape (tensor): shape of input image tensor + depth (int): number of core convolutional layers + num_classes (int): number of classes (CIFAR10 has 10) + + # Returns + model (Model): Keras model instance + """ + if (depth - 2) % 9 != 0: + raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])') + # Start model definition. + num_filters_in = 16 + num_res_blocks = int((depth - 2) / 9) + + inputs = Input(shape=input_shape) + # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths + x = resnet_layer(inputs=inputs, + num_filters=num_filters_in, + conv_first=True) + + # Instantiate the stack of residual units + for stage in range(3): + for res_block in range(num_res_blocks): + activation = 'relu' + batch_normalization = True + strides = 1 + if stage == 0: + num_filters_out = num_filters_in * 4 + if res_block == 0: # first layer and first stage + activation = None + batch_normalization = False + else: + num_filters_out = num_filters_in * 2 + if res_block == 0: # first layer but not first stage + strides = 2 # downsample + + # bottleneck residual unit + y = resnet_layer(inputs=x, + num_filters=num_filters_in, + kernel_size=1, + strides=strides, + activation=activation, + batch_normalization=batch_normalization, + conv_first=False) + y = resnet_layer(inputs=y, + num_filters=num_filters_in, + conv_first=False) + y = resnet_layer(inputs=y, + num_filters=num_filters_out, + kernel_size=1, + conv_first=False) + if res_block == 0: + # linear projection residual shortcut connection to match + # changed dims + x = resnet_layer(inputs=x, + num_filters=num_filters_out, + kernel_size=1, + strides=strides, + activation=None, + batch_normalization=False) + x = keras.layers.add([x, y]) + + num_filters_in = num_filters_out + + # Add classifier on top. + # v2 has BN-ReLU before Pooling + x = BatchNormalization()(x) + x = Activation('relu')(x) + x = AveragePooling2D(pool_size=8)(x) + y = Flatten()(x) + outputs = Dense(num_classes, + activation='softmax', + kernel_initializer='he_normal')(y) + + # Instantiate model. + model = Model(inputs=inputs, outputs=outputs) + return model + + +if version == 2: + model = resnet_v2(input_shape=input_shape, depth=depth) +else: + model = resnet_v1(input_shape=input_shape, depth=depth) + +model.compile(loss='categorical_crossentropy', + optimizer=Adam(lr=lr_schedule(0)), + metrics=['accuracy']) +model.summary() +print(model_type) + +# Prepare model model saving directory. +save_dir = os.path.join(os.getcwd(), 'saved_models') +model_name = 'cifar10_%s_model.{epoch:03d}.h5' % model_type +if not os.path.isdir(save_dir): + os.makedirs(save_dir) +filepath = os.path.join(save_dir, model_name) + +# Prepare callbacks for model saving and for learning rate adjustment. +checkpoint = ModelCheckpoint(filepath=filepath, + monitor='val_acc', + verbose=1, + save_best_only=True) + +lr_scheduler = LearningRateScheduler(lr_schedule) + +lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), + cooldown=0, + patience=5, + min_lr=0.5e-6) + +callbacks = [checkpoint, lr_reducer, lr_scheduler] + +# Run training, with or without data augmentation. +if not data_augmentation: + print('Not using data augmentation.') + model.fit(x_train, y_train, + batch_size=batch_size, + epochs=epochs, + validation_data=(x_test, y_test), + shuffle=True, + callbacks=callbacks) +else: + print('Using real-time data augmentation.') + # This will do preprocessing and realtime data augmentation: + datagen = ImageDataGenerator( + # set input mean to 0 over the dataset + featurewise_center=False, + # set each sample mean to 0 + samplewise_center=False, + # divide inputs by std of dataset + featurewise_std_normalization=False, + # divide each input by its std + samplewise_std_normalization=False, + # apply ZCA whitening + zca_whitening=False, + # epsilon for ZCA whitening + zca_epsilon=1e-06, + # randomly rotate images in the range (deg 0 to 180) + rotation_range=0, + # randomly shift images horizontally + width_shift_range=0.1, + # randomly shift images vertically + height_shift_range=0.1, + # set range for random shear + shear_range=0., + # set range for random zoom + zoom_range=0., + # set range for random channel shifts + channel_shift_range=0., + # set mode for filling points outside the input boundaries + fill_mode='nearest', + # value used for fill_mode = "constant" + cval=0., + # randomly flip images + horizontal_flip=True, + # randomly flip images + vertical_flip=False, + # set rescaling factor (applied before any other transformation) + rescale=None, + # set function that will be applied on each input + preprocessing_function=None, + # image data format, either "channels_first" or "channels_last" + data_format=None, + # fraction of images reserved for validation (strictly between 0 and 1) + validation_split=0.0) + + # Compute quantities required for featurewise normalization + # (std, mean, and principal components if ZCA whitening is applied). + datagen.fit(x_train) + + # Fit the model on the batches generated by datagen.flow(). + model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), + validation_data=(x_test, y_test), + epochs=epochs, verbose=1, workers=1, + callbacks=callbacks) + +# Score trained model. +scores = model.evaluate(x_test, y_test, verbose=1) +print('Test loss:', scores[0]) +print('Test accuracy:', scores[1]) diff --git a/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/model/__init__.py b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/model/__init__.py new file mode 100644 index 000000000..9772d6bd7 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/model/__init__.py @@ -0,0 +1,29 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/model/resnet.py b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/model/resnet.py new file mode 100644 index 000000000..b79d1703e --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/model/resnet.py @@ -0,0 +1,363 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import print_function +import tensorflow.keras as keras +from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, Activation +from tensorflow.keras.layers import AveragePooling2D, Input, Flatten +from tensorflow.keras.layers import UpSampling2D, Add, Lambda +from tensorflow.keras.regularizers import l2 +from tensorflow.keras.models import Model + +def oct_conv(inputs, + num_filters, + kernel_size, + strides, + alpha=0.5, + padding='same', + kernel_initializer='he_normal', + kernel_regularizer=l2(1e-4)): + ''' oct-conv + # Arguments + inputs: tensor or list of tensor, the number of input tensor + is one or two. if one, it's input_h, if two, it's [input_h, input_l] + alpha: num_filters * alpha is the number of out_l's filters, + num_filters * (1-alpha) is the number of out_h's filters + # return + out_h or [out_h, out_l]: it returns out_h when alpha is 0, else [out_h, out_l] + ''' + + if not isinstance(inputs, list): + inputs = [inputs] + + if alpha > 0: + if alpha == 1: + print("don't support alpha >= 1!") + exit(-1) + num_filters = [int(num_filters * (1-alpha)), int(num_filters * alpha)] + else: + num_filters = [int(num_filters)] + + outputs = [] + for i, nfilter in enumerate(num_filters): + out_ = [] + for j, x in enumerate(inputs): + if i == 0 and j == 1: + x = UpSampling2D(size=(2, 2), interpolation='nearest')(x) + elif i == 1 and j == 0: + x = AveragePooling2D(pool_size=2)(x) + conv = Conv2D(int(nfilter), + kernel_size=kernel_size, + strides=strides, + padding='same', + kernel_initializer='he_normal', + kernel_regularizer=l2(1e-4)) + x = conv(x) + out_.append(x) + if len(out_) == 2: + y = Add()(out_) + else: + y = out_[0] + outputs.append(y) + + if len(outputs) == 2: + return outputs + else: # just have one output + return outputs[0] + + +def oct_resnet_layer(inputs, + num_filters=16, + kernel_size=3, + strides=1, + alpha=0.5, + activation='relu', + batch_normalization=True, + conv_first=True): + """2D Convolution-Batch Normalization-Activation stack builder + + # Arguments + inputs (tensor): tensor or list of input tensor which from input image or previous layer, + the first oct-conv layer of network only has one input + sequense is: high-frequency, low-frequency + num_filters (int): Conv2D number of filters + kernel_size (int): Conv2D square kernel dimensions + strides (int): Conv2D square stride dimensions + alpha (float): The ratio of low-frequence output feature + note if alpha=0, output is only one tensor + activation (string): activation name + batch_normalization (bool): whether to include batch normalization + conv_first (bool): conv-bn-activation (True) or + bn-activation-conv (False) + + # Returns + x (list of tensor): tensor as input to the next layer + if oct_last is False, x contain out_h and out_l + """ + if conv_first: + if alpha > 0: + out_h, out_l = oct_conv(inputs, + num_filters=num_filters, + kernel_size=kernel_size, + strides=strides, + alpha=alpha) + if batch_normalization: + out_h = BatchNormalization()(out_h) + out_l = BatchNormalization()(out_l) + if activation is not None: + out_h = Activation(activation)(out_h) + out_l = Activation(activation)(out_l) + return [out_h, out_l] + else: + out_h = oct_conv(inputs, + num_filters=num_filters, + kernel_size=kernel_size, + strides=strides, + alpha=alpha) + if batch_normalization: + out_h = BatchNormalization()(out_h) + if activation is not None: + out_h = Activation(activation)(out_h) + return out_h + else: + inputs_ = [] + inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else [inputs] + for x in inputs: + if batch_normalization: + x = BatchNormalization()(x) + if activation is not None: + x = Activation(activation)(x) + inputs_.append(x) + + output = oct_conv(inputs_, + num_filters=num_filters, + kernel_size=kernel_size, + strides=strides, + alpha=alpha) + return output + + +def resnet_v1(input_shape, depth, num_classes=10): + """ResNet Version 1 Model builder [a] + + Stacks of 2 x (3 x 3) Conv2D-BN-ReLU + Last ReLU is after the shortcut connection. + At the beginning of each stage, the feature map size is halved (downsampled) + by a convolutional layer with strides=2, while the number of filters is + doubled. Within each stage, the layers have the same number filters and the + same number of filters. + Features maps sizes: + stage 0: 32x32, 16 + stage 1: 16x16, 32 + stage 2: 8x8, 64 + The Number of parameters is approx the same as Table 6 of [a]: + ResNet20 0.27M + ResNet32 0.46M + ResNet44 0.66M + ResNet56 0.85M + ResNet110 1.7M + + # Arguments + input_shape (tensor): shape of input image tensor + depth (int): number of core convolutional layers + num_classes (int): number of classes (CIFAR10 has 10) + + # Returns + model (Model): Keras model instance + """ + if (depth - 2) % 6 != 0: + raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])') + # Start model definition. + num_filters = 16 + num_res_blocks = int((depth - 2) / 6) + + inputs = Input(shape=input_shape) + x = oct_resnet_layer(inputs=inputs) # x is list of tensor, and len(x) == 2 + # Instantiate the stack of residual units + alpha = 0.5 + for stack in range(3): + if stack == 2: + alpha = 0 + for res_block in range(num_res_blocks): + strides = 1 + if stack > 0 and res_block == 0: # first layer but not first stack + strides = 2 # downsample + y = oct_resnet_layer(inputs=x, + num_filters=num_filters, + strides=strides, + alpha=alpha) + y = oct_resnet_layer(inputs=y, + num_filters=num_filters, + activation=None, + alpha=alpha) + if stack > 0 and res_block == 0: # first layer but not first stack + # linear projection residual shortcut connection to match + # changed dims, because the size of feature map has been changed + x = oct_resnet_layer(inputs=x, + num_filters=num_filters, + kernel_size=1, + strides=strides, + activation=None, + batch_normalization=False, + alpha=alpha) + if alpha == 0: + x = Add()([x, y]) + x = Activation('relu')(x) + else: + xh, xl = x + yh, yl = y + xh = Add()([xh, yh]) + xl = Add()([xl, yl]) + xh = Activation('relu')(xh) + xl = Activation('relu')(xl) + x = [xh, xl] + num_filters *= 2 + + + # Add classifier on top. + # v1 does not use BN after last shortcut connection-ReLU + x = AveragePooling2D(pool_size=8)(x) + y = Flatten()(x) + outputs = Dense(num_classes, + activation='softmax', + kernel_initializer='he_normal')(y) + + # Instantiate model. + model = Model(inputs=inputs, outputs=outputs) + return model + + +def resnet_v2(input_shape, depth, num_classes=10): + """ResNet Version 2 Model builder [b] + + Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as + bottleneck layer + First shortcut connection per layer is 1 x 1 Conv2D. + Second and onwards shortcut connection is identity. + At the beginning of each stage, the feature map size is halved (downsampled) + by a convolutional layer with strides=2, while the number of filter maps is + doubled. Within each stage, the layers have the same number filters and the + same filter map sizes. + Features maps sizes: + conv1 : 32x32, 16 + stage 0: 32x32, 64 + stage 1: 16x16, 128 + stage 2: 8x8, 256 + + # Arguments + input_shape (tensor): shape of input image tensor + depth (int): number of core convolutional layers + num_classes (int): number of classes (CIFAR10 has 10) + + # Returns + model (Model): Keras model instance + """ + if (depth - 2) % 9 != 0: + raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])') + # Start model definition. + num_filters_in = 16 + num_res_blocks = int((depth - 2) / 9) + + inputs = Input(shape=input_shape) + # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths + x = oct_resnet_layer(inputs=inputs, + num_filters=num_filters_in, + conv_first=True) + + # Instantiate the stack of residual units + alpha = 0.5 + for stage in range(3): + for res_block in range(num_res_blocks): + activation = 'relu' + batch_normalization = True + strides = 1 + if stage == 0: + num_filters_out = num_filters_in * 4 + if res_block == 0: # first layer and first stage + activation = None + batch_normalization = False + else: + num_filters_out = num_filters_in * 2 + if res_block == 0: # first layer but not first stage + strides = 2 # downsample + if stage == 2: # and res_block == (num_res_blocks - 1): + alpha = 0 + + # bottleneck residual unit + y = oct_resnet_layer(inputs=x, + num_filters=num_filters_in, + kernel_size=1, + strides=strides, + activation=activation, + batch_normalization=batch_normalization, + conv_first=False, + alpha=alpha) + y = oct_resnet_layer(inputs=y, + num_filters=num_filters_in, + conv_first=False, + alpha=alpha) + y = oct_resnet_layer(inputs=y, + num_filters=num_filters_out, + kernel_size=1, + conv_first=False, + alpha=alpha) + if res_block == 0: + # linear projection residual shortcut connection to match + # changed dims + x = oct_resnet_layer(inputs=x, + num_filters=num_filters_out, + kernel_size=1, + strides=strides, + activation=None, + batch_normalization=False, + alpha=alpha) + if alpha == 0: + x = keras.layers.add([x, y]) + else: + xh = keras.layers.add([x[0], y[0]]) + xl = keras.layers.add([x[1], y[1]]) + x = [xh, xl] + + num_filters_in = num_filters_out + + # Add classifier on top. + # v2 has BN-ReLU before Pooling + x = BatchNormalization()(x) + x = Activation('relu')(x) + x = AveragePooling2D(pool_size=8)(x) + y = Flatten()(x) + outputs = Dense(num_classes, + activation='softmax', + kernel_initializer='he_normal')(y) + + # Instantiate model. + model = Model(inputs=inputs, outputs=outputs) + return model + + diff --git a/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..801f37760 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:POK +PercisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/score.py b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/score.py new file mode 100644 index 000000000..8a85538c8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/score.py @@ -0,0 +1,76 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from tensorflow import keras +from tensorflow.keras.models import load_model +from tensorflow.keras.datasets import cifar10 +import numpy as np +import sys + +def score(modelfile): + # Load the CIFAR10 data. + (x_train, y_train), (x_test, y_test) = cifar10.load_data() + num_classes = 10 + + # Input image dimensions. + input_shape = x_train.shape[1:] + + # Normalize data. + x_train = x_train.astype('float32') / 255 + x_test = x_test.astype('float32') / 255 + + subtract_pixel_mean = True + # If subtract pixel mean is enabled + if subtract_pixel_mean: + x_train_mean = np.mean(x_train, axis=0) + x_train -= x_train_mean + x_test -= x_train_mean + + print('x_train shape:', x_train.shape) + print(x_train.shape[0], 'train samples') + print(x_test.shape[0], 'test samples') + print('y_train shape:', y_train.shape) + + # Convert class vectors to binary class matrices. + y_train = keras.utils.to_categorical(y_train, num_classes) + y_test = keras.utils.to_categorical(y_test, num_classes) + + model = load_model(modelfile) + # preds = model.predict(x) + # print(preds) + + # Score trained model. + scores = model.evaluate(x_test, y_test, verbose=1) + print('Test loss:', scores[0]) + print('Test accuracy:', scores[1]) + +if __name__ == '__main__': + modelfile = sys.argv[1] + print("score model: ", modelfile) + score(modelfile) diff --git a/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..f8e6a306e --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,213 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd`/../ + +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=0 + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#设置默认日志级别,不需要修改 +#export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="Oct-ResNet_ID2890_for_TensorFlow2.X" +#训练epoch +train_epochs=200 +#训练step +#train_steps=1000 +#训练batch_size +batch_size=32 +#num for res blocks in each stack +block_num=3 +# Model version +# Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2) +model_version=1 + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1P.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + --max_step # of step for training + --learning_rate learning rate + --batch batch size + --modeldir model dir + --save_interval save interval for ckpt + --loss_scale enable loss scale ,default is False + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --batch* ]];then + batch_size=`echo ${para#*=}` + elif [[ $para == --train_epochs* ]];then + train_epochs=`echo ${para#*=}` + elif [[ $para == --block_num* ]];then + block_num=`echo ${para#*=}` + elif [[ $para == --model_version* ]];then + model_version=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path +#设置环境变量,不需要修改 +echo "Device ID: $ASCEND_DEVICE_ID" + +#创建DeviceID输出目录,不需要修改 +if [ -d ${cur_path}/test/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/test/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt +else + mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt +fi + +#############执行训练######################### +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#执行训练脚本,以下传参不需要修改,其他需要模型审视修改 +#--data_path, --model_dir, --precision_mode, --precision_mode, --over_dump, --over_dump_path,--data_dump_flag,--data_dump_step,--data_dump_path,--profiling,--profiling_dump_path,--autotune +nohup python3 train.py \ + --data_dir=${data_path} \ + --batch_size=${batch_size} \ + --train_epochs=${train_epochs} \ + --block_num=${block_num} \ + --model_version=${model_version} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune} > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 + +wait + +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`cat ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | tr -d '\b\r' |grep -Eo "[0-9]*ms/step"| awk 'END {print $1}' | cut -d m -f 1` +TrainingTime=`awk 'BEGIN{printf "%.3f\n",'${TrainingTime}'/'1000'}'` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep "ms/step" ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END {print $(NF-3)}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" +echo "E2E Training Duration sec : $e2e_time" + +#性能看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +#cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tr -d '\b\r' | grep -Eo "loss: [0-9]*\.[0-9]*" | awk -F " " '{print $2}' > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}_loss.txt +grep "ms/step" ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk '{print $14}' > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk '{print}' ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${CaseName}_loss.txt| tail -n 1` + +# +Make_Time=`grep -a 'TOTLE_TIME' ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_$ASCEND_DEVICE_ID.log|awk 'END {print $3}'` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "CaseName = ${CaseName}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +#echo "Make_Time = ${Make_Time}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..c9fdc5ae1 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,213 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd`/../ + +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=0 + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#设置默认日志级别,不需要修改 +#export ASCEND_GLOBAL_LOG_LEVEL=3 + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="Oct-ResNet_ID2890_for_TensorFlow2.X" +#训练epoch +train_epochs=2 +#训练step +#train_steps=1000 +#训练batch_size +batch_size=32 +#num for res blocks in each stack +block_num=3 +# Model version +# Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2) +model_version=1 + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1P.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + --max_step # of step for training + --learning_rate learning rate + --batch batch size + --modeldir model dir + --save_interval save interval for ckpt + --loss_scale enable loss scale ,default is False + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --batch* ]];then + batch_size=`echo ${para#*=}` + elif [[ $para == --train_epochs* ]];then + train_epochs=`echo ${para#*=}` + elif [[ $para == --block_num* ]];then + block_num=`echo ${para#*=}` + elif [[ $para == --model_version* ]];then + model_version=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path +#设置环境变量,不需要修改 +echo "Device ID: $ASCEND_DEVICE_ID" + +#创建DeviceID输出目录,不需要修改 +if [ -d ${cur_path}/test/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/test/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt +else + mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt +fi + +#############执行训练######################### +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#执行训练脚本,以下传参不需要修改,其他需要模型审视修改 +#--data_path, --model_dir, --precision_mode, --precision_mode, --over_dump, --over_dump_path,--data_dump_flag,--data_dump_step,--data_dump_path,--profiling,--profiling_dump_path,--autotune +nohup python3 train.py \ + --data_dir=${data_path} \ + --batch_size=${batch_size} \ + --train_epochs=${train_epochs} \ + --block_num=${block_num} \ + --model_version=${model_version} \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune} > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 + +wait + +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`cat ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | tr -d '\b\r' |grep -Eo "[0-9]*ms/step"| awk 'END {print $1}' | cut -d m -f 1` +TrainingTime=`awk 'BEGIN{printf "%.3f\n",'${TrainingTime}'/'1000'}'` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${TrainingTime}'}'` + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep "ms/step" ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END {print $(NF-3)}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" +echo "E2E Training Duration sec : $e2e_time" + +#性能看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +#cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | tr -d '\b\r' | grep -Eo "loss: [0-9]*\.[0-9]*" | awk -F " " '{print $2}' > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}_loss.txt +grep "ms/step" ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk '{print $14}' > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk '{print}' ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${CaseName}_loss.txt| tail -n 1` + +# +Make_Time=`grep -a 'TOTLE_TIME' ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_$ASCEND_DEVICE_ID.log|awk 'END {print $3}'` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "CaseName = ${CaseName}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log +#echo "Make_Time = ${Make_Time}" >> ${cur_path}/test/output/${ASCEND_DEVICE_ID}/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..0c9c88371 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/Oct-ResNet_ID2890_for_TensorFlow2.X/train.py @@ -0,0 +1,358 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import shutil +import argparse +import ast + +from tensorflow import keras +from tensorflow.keras.optimizers import Adam +from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler +from tensorflow.keras.callbacks import ReduceLROnPlateau +from tensorflow.keras.preprocessing.image import ImageDataGenerator +# from tensorflow.keras import backend as K +# from tensorflow.keras.datasets import cifar10 +from keras.datasets.cifar import load_batch +from tensorflow.python.keras import backend + +import numpy as np +import os +import datetime + +import npu_device +from npu_device.compat.v1.npu_init import * +npu_device.compat.enable_v1() + +from model.resnet import resnet_v1, resnet_v2 + +import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() + +# import tensorflow.python.keras as keras +# from tensorflow.python.keras import backend as K +from keras import backend as K + + +parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) +parser.add_argument('--data_dir', default="../", help="""directory to data""") +parser.add_argument('--batch_size', default=32, type=int, help="""batch size for 1p""") +parser.add_argument('--train_epochs', default=1, type=int, help="""epochs""") +parser.add_argument("--block_num", default=3, type=int, help="num for res blocks in each stack.") +parser.add_argument("--model_version", default=1, type=int, help="Model version Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2).") + +#===============================NPU Migration========================================= +parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') +parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, help='if or not over detection, default is False') +parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, help='data dump flag, default is False') +parser.add_argument('--data_dump_step', default="10", help='data dump step, default is 10') +parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') +parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') +parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') +parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') +parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, help='use_mixlist flag, default is False') +parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, help='fusion_off flag, default is False') +parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') +parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') +parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval, help='autotune, default is False') +#===============================NPU Migration========================================= + +args = parser.parse_args() + +sess_config = tf.ConfigProto() +custom_op = sess_config.graph_options.rewrite_options.custom_optimizers.add() +custom_op.name = "NpuOptimizer" +custom_op.parameter_map["dynamic_input"].b = True +custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile") +#===============================NPU Migration========================================= +if args.data_dump_flag: + custom_op.parameter_map["enable_dump"].b = True + custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(args.data_dump_path) + custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes(args.data_dump_step) + custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all") +if args.over_dump: + custom_op.parameter_map["enable_dump_debug"].b = True + custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(args.over_dump_path) + custom_op.parameter_map["dump_debug_mode"].s = tf.compat.as_bytes("all") +if args.profiling: + custom_op.parameter_map["precision_mode"].b = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + custom_op.parameter_map["profiling_options"].s = tf.compat.as_bytes(profiling_options) +custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(args.precision_mode) +if args.use_mixlist and args.precision_mode=='allow_mix_precision': + custom_op.parameter_map["modify_mixlist"].s = tf.compat.as_bytes(args.mixlist_file) +if args.fusion_off_flag: + custom_op.parameter_map["sfusion_switch_file"].s = tf.compat.as_bytes(args.fusion_off_file) +if args.auto_tune: + custom_op.parameter_map["auto_tune_mode"].s = tf.compat.as_bytes("RL,GA") +#===============================NPU Migration========================================= +sess_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF +sess_config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF +sess = tf.Session(config=sess_config) +K.set_session(sess) + + +def lr_schedule(epoch): + """Learning Rate Schedule + + Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs. + Called automatically every epoch as part of callbacks during training. + + # Arguments + epoch (int): The number of epochs + + # Returns + lr (float32): learning rate + """ + lr = 1e-3 + if epoch > 180: + lr *= 0.5e-3 + elif epoch > 160: + lr *= 1e-3 + elif epoch > 120: + lr *= 1e-2 + elif epoch > 80: + lr *= 1e-1 + print('Learning rate: ', lr) + return lr + +def load_data(data_path): + dirname = 'cifar-10-batches-py' + origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' + path = os.path.join(data_path, dirname) + num_train_samples = 50000 + x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8') + y_train = np.empty((num_train_samples,), dtype='uint8') + for i in range(1, 6): + fpath = os.path.join(path, 'data_batch_' + str(i)) + (x_train[(i - 1) * 10000:i * 10000, :, :, :], + y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath) + fpath = os.path.join(path, 'test_batch') + x_test, y_test = load_batch(fpath) + y_train = np.reshape(y_train, (len(y_train), 1)) + y_test = np.reshape(y_test, (len(y_test), 1)) + if backend.image_data_format() == 'channels_last': + x_train = x_train.transpose(0, 2, 3, 1) + x_test = x_test.transpose(0, 2, 3, 1) + x_test = x_test.astype(x_train.dtype) + y_test = y_test.astype(y_train.dtype) + return (x_train, y_train), (x_test, y_test) + +if __name__ == "__main__": + import sys + # Training parameters + batch_size = args.batch_size # orig paper trained all networks with batch_size=128 + epochs = args.train_epochs + # epochs = 10 + data_augmentation = True + num_classes = 10 + + # Subtracting pixel mean improves accuracy + subtract_pixel_mean = True + + # num fo res blocks in each stack + n = args.block_num + + # Model version + # Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2) + version = args.model_version + + # Computed depth from supplied model parameter n + if version == 1: + depth = n * 6 + 2 + elif version == 2: + depth = n * 9 + 2 + + # Model name, depth and version + model_type = 'ResNet%dv%d' % (depth, version) + + starttime = datetime.datetime.now() + + # Load the CIFAR10 data. + (x_train, y_train), (x_test, y_test) = load_data(args.data_dir) + + # Input image dimensions. + input_shape = x_train.shape[1:] + + # Normalize data. + x_train = x_train.astype('float32') / 255 + x_test = x_test.astype('float32') / 255 + + # If subtract pixel mean is enabled + if subtract_pixel_mean: + x_train_mean = np.mean(x_train, axis=0) + x_train -= x_train_mean + x_test -= x_train_mean + + print('x_train shape:', x_train.shape) + print(x_train.shape[0], 'train samples') + print(x_test.shape[0], 'test samples') + print('y_train shape:', y_train.shape) + + # Convert class vectors to binary class matrices. + y_train = keras.utils.to_categorical(y_train, num_classes) + y_test = keras.utils.to_categorical(y_test, num_classes) + + if version == 2: + model = resnet_v2(input_shape=input_shape, depth=depth) + else: + model = resnet_v1(input_shape=input_shape, depth=depth) + + # from tensorflow.keras.utils import plot_model + # plot_model(model, to_file=model_type+'.pdf') + # print("write model graph done!") + # exit() + model.compile(loss='categorical_crossentropy', + optimizer=Adam(lr=lr_schedule(0)), + metrics=['accuracy']) + model.summary() + print(model_type) + + # Prepare model model saving directory. + save_dir = os.path.join(os.getcwd(), 'saved_models') + model_name = 'cifar10_%s_model.{epoch:03d}.h5' % model_type + if not os.path.isdir(save_dir): + os.makedirs(save_dir) + filepath = os.path.join(save_dir, model_name) + + # Prepare callbacks for model saving and for learning rate adjustment. + checkpoint = ModelCheckpoint(filepath=filepath, + monitor='val_acc', + verbose=1, + save_best_only=True) + + lr_scheduler = LearningRateScheduler(lr_schedule) + + lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), + cooldown=0, + patience=5, + min_lr=0.5e-6) + + callbacks = [checkpoint, lr_reducer, lr_scheduler] + + endtime = datetime.datetime.now() + # Run training, with or without data augmentation. + if not data_augmentation: + print('Not using data augmentation.') + history = model.fit(x_train, y_train, + batch_size=batch_size, + epochs=epochs, + validation_data=(x_test, y_test), + shuffle=True, + callbacks=callbacks) + else: + print('Using real-time data augmentation.') + # This will do preprocessing and realtime data augmentation: + datagen = ImageDataGenerator( + # set input mean to 0 over the dataset + featurewise_center=False, + # set each sample mean to 0 + samplewise_center=False, + # divide inputs by std of dataset + featurewise_std_normalization=False, + # divide each input by its std + samplewise_std_normalization=False, + # apply ZCA whitening + zca_whitening=False, + # epsilon for ZCA whitening + zca_epsilon=1e-06, + # randomly rotate images in the range (deg 0 to 180) + rotation_range=0, + # randomly shift images horizontally + width_shift_range=0.1, + # randomly shift images vertically + height_shift_range=0.1, + # set range for random shear + shear_range=0., + # set range for random zoom + zoom_range=0., + # set range for random channel shifts + channel_shift_range=0., + # set mode for filling points outside the input boundaries + fill_mode='nearest', + # value used for fill_mode = "constant" + cval=0., + # randomly flip images + horizontal_flip=True, + # randomly flip images + vertical_flip=False, + # set rescaling factor (applied before any other transformation) + rescale=None, + # set function that will be applied on each input + preprocessing_function=None, + # image data format, either "channels_first" or "channels_last" + data_format=None, + # fraction of images reserved for validation (strictly between 0 and 1) + validation_split=0.0) + + # Compute quantities required for featurewise normalization + # (std, mean, and principal components if ZCA whitening is applied). + datagen.fit(x_train) + + # Fit the model on the batches generated by datagen.flow(). + history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), + validation_data=(x_test, y_test), + epochs=epochs, verbose=1, workers=1, + callbacks=callbacks) + + # import matplotlib.pyplot as plt + # # draw acc curve + # plt.plot(history.history['acc']) + # plt.plot(history.history['val_acc']) + # plt.title('Model accuracy') + # plt.ylabel('Accuracy') + # plt.xlabel('Epoch') + # plt.legend(['Train', 'Test'], loc='upper left') + # plt.savefig('./acc.png') + # plt.show() + + # draw loss curve + # plt.plot(history.history['loss']) + # plt.plot(history.history['val_loss']) + # plt.title('Model loss') + # plt.ylabel('Loss') + # plt.xlabel('Epoch') + # plt.legend(['Train', 'Test'], loc='upper left') + # plt.savefig('./loss.png') + # plt.show() + + # Score trained model. + scores = model.evaluate(x_test, y_test, verbose=1) + print('Test loss:', scores[0]) + print('Test accuracy:', scores[1]) + + TOTLE_TIME = (endtime - starttime).seconds + print("TOTLE_TIME : ", TOTLE_TIME) + sess.close() -- Gitee From a4fac49830fad1ea3a85dabc51bfc56d01fd80d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:20:14 +0000 Subject: [PATCH 45/54] =?UTF-8?q?pointnet=5Fsegmentation=5FID2532=5Ffor=5F?= =?UTF-8?q?TensorFlow2.X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 284 ++++++++ .../ReadME.md | 31 + .../pointnet_segmentation.py | 666 ++++++++++++++++++ .../requirements.txt | 0 .../test/train_full_1p.sh | 104 +++ .../test/train_performance_1p_dynamic_eval.sh | 115 +++ .../test/train_performance_1p_static_eval.sh | 104 +++ 7 files changed, 1304 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/ReadME.md create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/pointnet_segmentation.py create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh create mode 100644 TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh diff --git a/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/ReadME.md b/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/ReadME.md new file mode 100644 index 000000000..46f7d72e9 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/ReadME.md @@ -0,0 +1,31 @@ +# 一、基础信息 + +网络名称:`pointnet_segmentation_ID2532_for_TensorFlow2.X` + +github addr:https://github.com/keras-team/keras-io/tree/master/examples/vision + +# 二、代码修改 + +# 三、程序运行 + +```shell +bash run_1p.sh +``` + +# 四、归档文件路径 + +1、数据集 +原始数据集:https://github.com/soumik12345/point-cloud-segmentation/releases/download/v0.1/shapenet.zip +根据代码,实际归档: + PartAnnotation/02691156/ + PartAnnotation/metadata.json + +2、归档文件 + +3、迁移代码 + +4、源代码 + +5、源迁移代码 + +# 五、NPU工作环境 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/pointnet_segmentation.py b/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/pointnet_segmentation.py new file mode 100644 index 000000000..a90419dc0 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/pointnet_segmentation.py @@ -0,0 +1,666 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Title: Point cloud segmentation with PointNet +Author: [Soumik Rakshit](https://github.com/soumik12345), [Sayak Paul](https://github.com/sayakpaul) +Date created: 2020/10/23 +Last modified: 2020/10/24 +Description: Implementation of a PointNet-based model for segmenting point clouds. +""" +""" +## Introduction + +A "point cloud" is an important type of data structure for storing geometric shape data. +Due to its irregular format, it's often transformed into +regular 3D voxel grids or collections of images before being used in deep learning applications, +a step which makes the data unnecessarily large. +The PointNet family of models solves this problem by directly consuming point clouds, respecting +the permutation-invariance property of the point data. The PointNet family of +models provides a simple, unified architecture +for applications ranging from **object classification**, **part segmentation**, to +**scene semantic parsing**. + +In this example, we demonstrate the implementation of the PointNet architecture +for shape segmentation. + +### References + +- [PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation](https://arxiv.org/abs/1612.00593) +- [Point cloud classification with PointNet](https://keras.io/examples/vision/pointnet/) +- [Spatial Transformer Networks](https://arxiv.org/abs/1506.02025) +""" + +""" +## Imports +""" + +import os +import json +import random +import numpy as np +import pandas as pd +from tqdm import tqdm +from glob import glob + +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import layers + +import npu_device +npu_device.open().as_default() + +import matplotlib.pyplot as plt + +import argparse +import ast + + +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_path', default='./', + help="""directory to data""") + parser.add_argument('--batch_size', default=32, type=int, + help="""batch size for 1p""") + parser.add_argument('--epochs', default=60, type=int, + help="""epochs""") + parser.add_argument('--drop_remainder', default="False", type=ast.literal_eval, + help="""drop_remainder True or False remote dynamic or static input""") + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + +args = parse_args() + +""" +## Downloading Dataset + +The [ShapeNet dataset](https://shapenet.org/) is an ongoing effort to establish a richly-annotated, +large-scale dataset of 3D shapes. **ShapeNetCore** is a subset of the full ShapeNet +dataset with clean single 3D models and manually verified category and alignment +annotations. It covers 55 common object categories, with about 51,300 unique 3D models. + +For this example, we use one of the 12 object categories of +[PASCAL 3D+](http://cvgl.stanford.edu/projects/pascal3d.html), +included as part of the ShapenetCore dataset. +""" + +# dataset_url = "https://github.com/soumik12345/point-cloud-segmentation/releases/download/v0.1/shapenet.zip" + +# dataset_path = keras.utils.get_file( + # fname="shapenet.zip", + # origin=dataset_url, + # cache_subdir="datasets", + # hash_algorithm="auto", + # extract=True, + # archive_format="auto", + # cache_dir="datasets", +# ) + +""" +## Loading the dataset + +We parse the dataset metadata in order to easily map model categories to their +respective directories and segmentation classes to colors for the purpose of +visualization. +""" + +metadata_file = os.path.join(args.data_path, "PartAnnotation/metadata.json") +with open(metadata_file) as json_file: + metadata = json.load(json_file) + +print(metadata) + +""" +In this example, we train PointNet to segment the parts of an `Airplane` model. +""" + +points_dir = "{}/PartAnnotation/{}/points".format(args.data_path, + metadata["Airplane"]["directory"] +) +labels_dir = "{}/PartAnnotation/{}/points_label".format(args.data_path, + metadata["Airplane"]["directory"] +) +LABELS = metadata["Airplane"]["lables"] +COLORS = metadata["Airplane"]["colors"] + +VAL_SPLIT = 0.2 +NUM_SAMPLE_POINTS = 1024 +BATCH_SIZE = args.batch_size +EPOCHS = args.epochs +INITIAL_LR = 1e-3 + +""" +## Structuring the dataset + +We generate the following in-memory data structures from the Airplane point clouds and +their labels: + +- `point_clouds` is a list of `np.array` objects that represent the point cloud data in +the form of x, y and z coordinates. Axis 0 represents the number of points in the +point cloud, while axis 1 represents the coordinates. `all_labels` is the list +that represents the label of each coordinate as a string (needed mainly for +visualization purposes). +- `test_point_clouds` is in the same format as `point_clouds`, but doesn't have +corresponding the labels of the point clouds. +- `all_labels` is a list of `np.array` objects that represent the point cloud labels +for each coordinate, corresponding to the `point_clouds` list. +- `point_cloud_labels` is a list of `np.array` objects that represent the point cloud +labels for each coordinate in one-hot encoded form, corresponding to the `point_clouds` +list. +""" + +point_clouds, test_point_clouds = [], [] +point_cloud_labels, all_labels = [], [] + +points_files = glob(os.path.join(points_dir, "*.pts")) +for point_file in tqdm(points_files): + point_cloud = np.loadtxt(point_file) + if point_cloud.shape[0] < NUM_SAMPLE_POINTS: + continue + + # Get the file-id of the current point cloud for parsing its + # labels. + file_id = point_file.split("/")[-1].split(".")[0] + label_data, num_labels = {}, 0 + for label in LABELS: + label_file = os.path.join(labels_dir, label, file_id + ".seg") + if os.path.exists(label_file): + label_data[label] = np.loadtxt(label_file).astype("float32") + num_labels = len(label_data[label]) + + # Point clouds having labels will be our training samples. + try: + label_map = ["none"] * num_labels + for label in LABELS: + for i, data in enumerate(label_data[label]): + label_map[i] = label if data == 1 else label_map[i] + label_data = [ + LABELS.index(label) if label != "none" else len(LABELS) + for label in label_map + ] + # Apply one-hot encoding to the dense label representation. + label_data = keras.utils.to_categorical(label_data, num_classes=len(LABELS) + 1) + + point_clouds.append(point_cloud) + point_cloud_labels.append(label_data) + all_labels.append(label_map) + except KeyError: + test_point_clouds.append(point_cloud) + +""" +Next, we take a look at some samples from the in-memory arrays we just generated: +""" + +# for _ in range(5): + # i = random.randint(0, len(point_clouds) - 1) + # print(f"point_clouds[{i}].shape:", point_clouds[0].shape) + # print(f"point_cloud_labels[{i}].shape:", point_cloud_labels[0].shape) + # for j in range(5): + # print( + # f"all_labels[{i}][{j}]:", + # all_labels[i][j], + # f"\tpoint_cloud_labels[{i}][{j}]:", + # point_cloud_labels[i][j], + # "\n", + # ) + +""" +Now, let's visualize some of the point clouds along with their labels. +""" + + +# def visualize_data(point_cloud, labels): + # df = pd.DataFrame( + # data={ + # "x": point_cloud[:, 0], + # "y": point_cloud[:, 1], + # "z": point_cloud[:, 2], + # "label": labels, + # } + # ) + # fig = plt.figure(figsize=(15, 10)) + # ax = plt.axes(projection="3d") + # for index, label in enumerate(LABELS): + # c_df = df[df["label"] == label] + # try: + # ax.scatter( + # c_df["x"], c_df["y"], c_df["z"], label=label, alpha=0.5, c=COLORS[index] + # ) + # except IndexError: + # pass + # ax.legend() + # plt.show() + + +# visualize_data(point_clouds[0], all_labels[0]) +# visualize_data(point_clouds[300], all_labels[300]) + + +""" +### Preprocessing + +Note that all the point clouds that we have loaded consist of a variable number of points, +which makes it difficult for us to batch them together. In order to overcome this problem, we +randomly sample a fixed number of points from each point cloud. We also normalize the +point clouds in order to make the data scale-invariant. +""" + +for index in tqdm(range(len(point_clouds))): + current_point_cloud = point_clouds[index] + current_label_cloud = point_cloud_labels[index] + current_labels = all_labels[index] + num_points = len(current_point_cloud) + # Randomly sampling respective indices. + sampled_indices = random.sample(list(range(num_points)), NUM_SAMPLE_POINTS) + # Sampling points corresponding to sampled indices. + sampled_point_cloud = np.array([current_point_cloud[i] for i in sampled_indices]) + # Sampling corresponding one-hot encoded labels. + sampled_label_cloud = np.array([current_label_cloud[i] for i in sampled_indices]) + # Sampling corresponding labels for visualization. + sampled_labels = np.array([current_labels[i] for i in sampled_indices]) + # Normalizing sampled point cloud. + norm_point_cloud = sampled_point_cloud - np.mean(sampled_point_cloud, axis=0) + norm_point_cloud /= np.max(np.linalg.norm(norm_point_cloud, axis=1)) + point_clouds[index] = norm_point_cloud + point_cloud_labels[index] = sampled_label_cloud + all_labels[index] = sampled_labels + +""" +Let's visualize the sampled and normalized point clouds along with their corresponding +labels. +""" + +# visualize_data(point_clouds[0], all_labels[0]) +# visualize_data(point_clouds[300], all_labels[300]) + +""" +### Creating TensorFlow datasets + +We create `tf.data.Dataset` objects for the training and validation data. +We also augment the training point clouds by applying random jitter to them. +""" + + +def load_data(point_cloud_batch, label_cloud_batch): + point_cloud_batch.set_shape([NUM_SAMPLE_POINTS, 3]) + label_cloud_batch.set_shape([NUM_SAMPLE_POINTS, len(LABELS) + 1]) + return point_cloud_batch, label_cloud_batch + + +def augment(point_cloud_batch, label_cloud_batch): + noise = tf.random.uniform( + tf.shape(label_cloud_batch), -0.005, 0.005, dtype=tf.float64 + ) + point_cloud_batch += noise[:, :, :3] + return point_cloud_batch, label_cloud_batch + + +def generate_dataset(point_clouds, label_clouds, is_training=True): + dataset = tf.data.Dataset.from_tensor_slices((point_clouds, label_clouds)) + dataset = dataset.shuffle(BATCH_SIZE * 100) if is_training else dataset + dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE) + dataset = dataset.batch(batch_size=BATCH_SIZE, drop_remainder=args.drop_remainder) + dataset = ( + dataset.map(augment, num_parallel_calls=tf.data.AUTOTUNE) + if is_training + else dataset + ) + return dataset + + +split_index = int(len(point_clouds) * (1 - VAL_SPLIT)) +train_point_clouds = point_clouds[:split_index] +train_label_cloud = point_cloud_labels[:split_index] +total_training_examples = len(train_point_clouds) + +val_point_clouds = point_clouds[split_index:] +val_label_cloud = point_cloud_labels[split_index:] + +print("Num train point clouds:", len(train_point_clouds)) +print("Num train point cloud labels:", len(train_label_cloud)) +print("Num val point clouds:", len(val_point_clouds)) +print("Num val point cloud labels:", len(val_label_cloud)) + +train_dataset = generate_dataset(train_point_clouds, train_label_cloud) +val_dataset = generate_dataset(val_point_clouds, val_label_cloud, is_training=False) + +print("Train Dataset:", train_dataset) +print("Validation Dataset:", val_dataset) + +""" +## PointNet model + +The figure below depicts the internals of the PointNet model family: + +![](https://i.imgur.com/qFLNw5L.png) + +Given that PointNet is meant to consume an ***unordered set*** of coordinates as its input data, +its architecture needs to match the following characteristic properties +of point cloud data: + +### Permutation invariance + +Given the unstructured nature of point cloud data, a scan made up of `n` points has `n!` +permutations. The subsequent data processing must be invariant to the different +representations. In order to make PointNet invariant to input permutations, we use a +symmetric function (such as max-pooling) once the `n` input points are mapped to +higher-dimensional space. The result is a **global feature vector** that aims to capture +an aggregate signature of the `n` input points. The global feature vector is used alongside +local point features for segmentation. + +![](https://i.imgur.com/0mrvvjb.png) + +### Transformation invariance + +Segmentation outputs should be unchanged if the object undergoes certain transformations, +such as translation or scaling. For a given input point cloud, we apply an appropriate +rigid or affine transformation to achieve pose normalization. Because each of the `n` input +points are represented as a vector and are mapped to the embedding spaces independently, +applying a geometric transformation simply amounts to matrix multiplying each point with +a transformation matrix. This is motivated by the concept of +[Spatial Transformer Networks](https://arxiv.org/abs/1506.02025). + +The operations comprising the T-Net are motivated by the higher-level architecture of +PointNet. MLPs (or fully-connected layers) are used to map the input points independently +and identically to a higher-dimensional space; max-pooling is used to encode a global +feature vector whose dimensionality is then reduced with fully-connected layers. The +input-dependent features at the final fully-connected layer are then combined with +globally trainable weights and biases, resulting in a 3-by-3 transformation matrix. + +![](https://i.imgur.com/aEj3GYi.png) + +### Point interactions + +The interaction between neighboring points often carries useful information (i.e., a +single point should not be treated in isolation). Whereas classification need only make +use of global features, segmentation must be able to leverage local point features along +with global point features. + + +**Note**: The figures presented in this section have been taken from the +[original paper](https://arxiv.org/abs/1612.00593). +""" + +""" +Now that we know the pieces that compose the PointNet model, we can implement the model. +We start by implementing the basic blocks i.e., the convolutional block and the multi-layer +perceptron block. +""" + + +def conv_block(x: tf.Tensor, filters: int, name: str) -> tf.Tensor: + x = layers.Conv1D(filters, kernel_size=1, padding="valid", name=f"{name}_conv")(x) + x = layers.BatchNormalization(momentum=0.0, name=f"{name}_batch_norm")(x) + return layers.Activation("relu", name=f"{name}_relu")(x) + + +def mlp_block(x: tf.Tensor, filters: int, name: str) -> tf.Tensor: + x = layers.Dense(filters, name=f"{name}_dense")(x) + x = layers.BatchNormalization(momentum=0.0, name=f"{name}_batch_norm")(x) + return layers.Activation("relu", name=f"{name}_relu")(x) + + +""" +We implement a regularizer (taken from +[this example](https://keras.io/examples/vision/pointnet/#build-a-model)) +to enforce orthogonality in the feature space. This is needed to ensure +that the magnitudes of the transformed features do not vary too much. +""" + + +class OrthogonalRegularizer(keras.regularizers.Regularizer): + """Reference: https://keras.io/examples/vision/pointnet/#build-a-model""" + + def __init__(self, num_features, l2reg=0.001): + self.num_features = num_features + self.l2reg = l2reg + self.identity = tf.eye(num_features) + + def __call__(self, x): + x = tf.reshape(x, (-1, self.num_features, self.num_features)) + xxt = tf.tensordot(x, x, axes=(2, 2)) + xxt = tf.reshape(xxt, (-1, self.num_features, self.num_features)) + return tf.reduce_sum(self.l2reg * tf.square(xxt - self.identity)) + + def get_config(self): + config = super(TransformerEncoder, self).get_config() + config.update({"num_features": self.num_features, "l2reg_strength": self.l2reg}) + return config + + +""" +The next piece is the transformation network which we explained earlier. +""" + + +def transformation_net(inputs: tf.Tensor, num_features: int, name: str) -> tf.Tensor: + """ + Reference: https://keras.io/examples/vision/pointnet/#build-a-model. + + The `filters` values come from the original paper: + https://arxiv.org/abs/1612.00593. + """ + x = conv_block(inputs, filters=64, name=f"{name}_1") + x = conv_block(x, filters=128, name=f"{name}_2") + x = conv_block(x, filters=1024, name=f"{name}_3") + x = layers.GlobalMaxPooling1D()(x) + x = mlp_block(x, filters=512, name=f"{name}_1_1") + x = mlp_block(x, filters=256, name=f"{name}_2_1") + return layers.Dense( + num_features * num_features, + kernel_initializer="zeros", + bias_initializer=keras.initializers.Constant(np.eye(num_features).flatten()), + activity_regularizer=OrthogonalRegularizer(num_features), + name=f"{name}_final", + )(x) + + +def transformation_block(inputs: tf.Tensor, num_features: int, name: str) -> tf.Tensor: + transformed_features = transformation_net(inputs, num_features, name=name) + transformed_features = layers.Reshape((num_features, num_features))( + transformed_features + ) + return layers.Dot(axes=(2, 1), name=f"{name}_mm")([inputs, transformed_features]) + + +""" +Finally, we piece the above blocks together and implement the segmentation model. +""" + + +def get_shape_segmentation_model(num_points: int, num_classes: int) -> keras.Model: + input_points = keras.Input(shape=(None, 3)) + + # PointNet Classification Network. + transformed_inputs = transformation_block( + input_points, num_features=3, name="input_transformation_block" + ) + features_64 = conv_block(transformed_inputs, filters=64, name="features_64") + features_128_1 = conv_block(features_64, filters=128, name="features_128_1") + features_128_2 = conv_block(features_128_1, filters=128, name="features_128_2") + transformed_features = transformation_block( + features_128_2, num_features=128, name="transformed_features" + ) + features_512 = conv_block(transformed_features, filters=512, name="features_512") + features_2048 = conv_block(features_512, filters=2048, name="pre_maxpool_block") + global_features = layers.MaxPool1D(pool_size=num_points, name="global_features")( + features_2048 + ) + global_features = tf.tile(global_features, [1, num_points, 1]) + + # Segmentation head. + segmentation_input = layers.Concatenate(name="segmentation_input")( + [ + features_64, + features_128_1, + features_128_2, + transformed_features, + features_512, + global_features, + ] + ) + segmentation_features = conv_block( + segmentation_input, filters=128, name="segmentation_features" + ) + outputs = layers.Conv1D( + num_classes, kernel_size=1, activation="softmax", name="segmentation_head" + )(segmentation_features) + return keras.Model(input_points, outputs) + + +""" +## Instantiate the model +""" + +x, y = next(iter(train_dataset)) + +num_points = x.shape[1] +num_classes = y.shape[-1] + +segmentation_model = get_shape_segmentation_model(num_points, num_classes) +segmentation_model.summary() + +""" +## Training + +For the training the authors recommend using a learning rate schedule that decays the +initial learning rate by half every 20 epochs. In this example, we resort to 15 epochs. +""" + +training_step_size = total_training_examples // BATCH_SIZE +total_training_steps = training_step_size * EPOCHS +print(f"Total training steps: {total_training_steps}.") + +lr_schedule = keras.optimizers.schedules.PiecewiseConstantDecay( + boundaries=[training_step_size * 15, training_step_size * 15], + values=[INITIAL_LR, INITIAL_LR * 0.5, INITIAL_LR * 0.25], +) + +steps = tf.range(total_training_steps, dtype=tf.int32) +lrs = [lr_schedule(step) for step in steps] + +# plt.plot(lrs) +# plt.xlabel("Steps") +# plt.ylabel("Learning Rate") +# plt.show() + +""" +Finally, we implement a utility for running our experiments and launch model training. +""" + +def run_experiment(epochs): + + segmentation_model = get_shape_segmentation_model(num_points, num_classes) + segmentation_model.compile( + optimizer=keras.optimizers.Adam(learning_rate=lr_schedule), + loss=keras.losses.CategoricalCrossentropy(), + metrics=["accuracy"], + ) + + checkpoint_filepath = "/tmp/checkpoint" + checkpoint_callback = keras.callbacks.ModelCheckpoint( + checkpoint_filepath, + monitor="val_loss", + save_best_only=True, + save_weights_only=True, + ) + + history = segmentation_model.fit( + train_dataset, + validation_data=val_dataset, + epochs=epochs, + callbacks=[checkpoint_callback], + ) + segmentation_model.save_weights(filepath="pointnet_segmentation", save_format="tf") + + segmentation_model.load_weights(checkpoint_filepath) + + return segmentation_model, history + + +segmentation_model, history = run_experiment(epochs=EPOCHS) + +""" +## Visualize the training landscape +""" + + +# def plot_result(item): + # plt.plot(history.history[item], label=item) + # plt.plot(history.history["val_" + item], label="val_" + item) + # plt.xlabel("Epochs") + # plt.ylabel(item) + # plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14) + # plt.legend() + # plt.grid() + # plt.show() + + +# plot_result("loss") +# plot_result("accuracy") + +""" +## Inference +""" + +""" +validation_batch = next(iter(val_dataset)) +val_predictions = segmentation_model.predict(validation_batch[0]) +print(f"Validation prediction shape: {val_predictions.shape}") + + +def visualize_single_point_cloud(point_clouds, label_clouds, idx): + label_map = LABELS + ["none"] + point_cloud = point_clouds[idx] + label_cloud = label_clouds[idx] + visualize_data(point_cloud, [label_map[np.argmax(label)] for label in label_cloud]) + + +idx = np.random.choice(len(validation_batch[0])) +print(f"Index selected: {idx}") + +# Plotting with ground-truth. +visualize_single_point_cloud(validation_batch[0], validation_batch[1], idx) + +# Plotting with predicted labels. +visualize_single_point_cloud(validation_batch[0], val_predictions, idx) +""" + +""" +## Final notes + +If you are interested in learning more about this topic, you may find +[this repository](https://github.com/soumik12345/point-cloud-segmentation) +useful. +""" diff --git a/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..7c84b7d20 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,104 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="pointnet_segmentation_ID2532_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=60 +#训练step +train_steps=60000 +#学习率 +#learning_rate=1e-5 + +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 pointnet_segmentation.py --data_path=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --drop_remainder=True > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep "ms/step" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'NR==1' | awk -F " " '{print$5}' | tr -cd "[0-9]"` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep " val_accuracy:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $17}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep " loss:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $8}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh b/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh new file mode 100644 index 000000000..bd71305f9 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/test/train_performance_1p_dynamic_eval.sh @@ -0,0 +1,115 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="pointnet_segmentation_ID2532_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=1 +#训练step +train_steps=60000 +#学习率 +#learning_rate=1e-5 + +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 pointnet_segmentation.py --data_path=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --drop_remainder=False > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +##echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +##结果打印,不需要修改 +#echo "------------------ Final result ------------------" +##输出性能FPS,需要模型审视修改 +#TrainingTime=`grep 1875/1875 $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $3}'|awk 'NR==2'|tr -cd "[0-9]"` +#wait +#FPS=`awk 'BEGIN{printf "%.2f\n",'1875'*'${batch_size}'/'${TrainingTime}'}'` +##打印,不需要修改 +#echo "Final Performance images/sec : $FPS" + +##输出训练精度,需要模型审视修改 +#train_accuracy=`grep sparse_categorical_accuracy $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |awk 'END {print $NF}'` +##打印,不需要修改 +#echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +##吞吐量 +#ActualFPS=${FPS} +##单迭代训练时长 +#TrainingTime=${TrainingTime} + +##从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +#grep student_loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $9}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +##最后一个迭代loss值,不需要修改 +#ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +ModelStatus="图执行FAIL" +DTS_Number="DTS2021090622224" +# error_msg="E19999" +error_msg="EZ3002: Optype \[Conv2DBackpropFilter\] of Ops kernel \[AIcoreEngine\] is unsupported" +Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +# error_msg="Graph engine process graph failed: E19999: Inner Error! Output shape is still unknown after shape inference. shape = [-1]." + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh b/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh new file mode 100644 index 000000000..33d80ab04 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/pointnet_segmentation_ID2532_for_TensorFlow2.X/test/train_performance_1p_static_eval.sh @@ -0,0 +1,104 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="pointnet_segmentation_ID2532_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=1 +#训练step +train_steps=60000 +#学习率 +#learning_rate=1e-5 + +#参数配置 +data_path="" + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path + +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 pointnet_segmentation.py --data_path=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --drop_remainder=True > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait + +end=$(date +%s) +e2e_time=$(( $end - $start )) + +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep "ms/step" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'NR==1' | awk -F " " '{print$5}' | tr -cd "[0-9]"` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep " val_accuracy:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $17}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep " loss:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $8}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log -- Gitee From 1ae78872889af5f95ccc93e79f46208a1489dd6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:20:33 +0000 Subject: [PATCH 46/54] =?UTF-8?q?siamese=5Fcontrastive=5FID2538=5Ffor=5FTe?= =?UTF-8?q?nsorFlow2.X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 21 + .../README.md | 191 ++++++++ .../configs/ops_info.json | 7 + .../modelzoo_level.txt | 3 + .../requirements.txt | 0 .../siamese_contrastive.py | 412 ++++++++++++++++++ .../test/train_full_1p.sh | 195 +++++++++ .../test/train_performance_1p.sh | 228 ++++++++++ 8 files changed, 1057 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/configs/ops_info.json create mode 100644 TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/siamese_contrastive.py create mode 100644 TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/test/train_performance_1p.sh diff --git a/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..51d555a15 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Ke YU + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..b9a418b2b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/README.md @@ -0,0 +1,191 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Image Classification** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.4.8** + +**大小(Size):35KB** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Official** + +**描述(Description):基于TensorFlow框架的使用经过对比损失训练的孪生网络进行相似性学习训练代码** + +

概述

+ +## 简述 + +孪生网络,是在两个或多个姐妹网络之间共享权重的神经网络,每个生成其各自输入的嵌入向量。在有监督的相似性学习中,网络被训练以最大化不同类输入的嵌入之间的对比(距离),同时最小化之间的距离相似类的嵌入,导致嵌入空间反映训练输入的类分割。 + +- 参考论文: + + skip + +- 参考实现: + + https://github.com/keras-team/keras-io/blob/master/examples/vision/siamese_contrastive.py + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + + +## 默认配置 + +- 网络结构: + - 6-layers + - 5324 total params + +- 训练超参(单卡): + - Batch size: 16 + - Dtype: float32 + - Margin: 1 + - Train epoch: 10 + + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 + +拉起脚本中,传入--precision_mode='allow_mix_precision' + +``` + ./train_performance_1p_16bs.sh --help + +parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message +``` + +相关代码示例: + +``` +flags.DEFINE_string(name='precision_mode', default= 'allow_fp32_to_fp16', + help='allow_fp32_to_fp16/force_fp16/ ' + 'must_keep_origin_dtype/allow_mix_precision.') + +npu_device.global_options().precision_mode=FLAGS.precision_mode +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + +

快速上手

+ +## 数据集准备 + +1、用户自行准备好数据集,本网络使用的数据集是MNIST数据集 + +数据集目录参考如下: + +``` +├──mnist.npz +``` + + + +## 模型训练 + +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + 2. 单卡训练 + + 2. 1单卡训练指令(脚本位于siamese_contrastive_ID2538_for_TensorFlow2.X/test/train_full_1p.sh),需要先使用cd命令进入test目录下,再使用下面的命令启动训练。请确保下面例子中的“--data_path”修改为用户的数据路径,这里选择将mnist.npz放在home目录下。 + + bash train_full_1p.sh --data_path=/home + + + + +

高级参考

+ +## 脚本和示例代码 + +``` +|--LICENSE +|--README.md #说明文档 +|--siamese_contrastive.py #训练代码 +|--requirements.txt #所需依赖 +|--test #训练脚本目录 +| |--train_full_1p.sh #全量训练脚本 +| |--train_performance_1p.sh #performance训练脚本 +``` + +## 脚本参数 + +``` +--data_path # the path to train data +--batch_size # batch size +--epochs # epochs of training +--log_steps # TimeHis log Step +--precision_mode # the path to save over dump data +--over_dump # if or not over detection, default is False +--data_dump_flag # data dump flag, default is False +--data_dump_step # data dump step, default is 10 +--profiling # if or not profiling for performance debug, default is False +--profiling_dump_path # the path to save profiling data +--over_dump_path # the path to save over dump data +--data_dump_path # the path to save dump data +--use_mixlist # use_mixlist flag, default is False +--fusion_off_flag # fusion_off flag, default is False +--mixlist_file # mixlist file name, default is ops_info.json +--fusion_off_file # fusion_off file name, default is fusion_switch.cfg +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡或者多卡训练。单卡和多卡通过运行不同脚本,支持单卡,8卡网络训练。模型存储路径为${cur_path}/output/${ASCEND_DEVICE_ID},包括训练的log以及checkpoints文件。以8卡训练为例,loss信息在文件${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/configs/ops_info.json b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/configs/ops_info.json new file mode 100644 index 000000000..9e13711c6 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/configs/ops_info.json @@ -0,0 +1,7 @@ +{ + "black-list":{ + "to-add":[ + "SqrtGrad" + ] + } +} \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..41666def8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:PERFECT +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/siamese_contrastive.py b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/siamese_contrastive.py new file mode 100644 index 000000000..8ed7493ac --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/siamese_contrastive.py @@ -0,0 +1,412 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Title: Image similarity estimation using a Siamese Network with a contrastive loss +Author: Mehdi +Date created: 2021/05/06 +Last modified: 2021/05/06 +Description: Similarity learning using a siamese network trained with a contrastive loss. +""" + +""" +## Introduction + +[Siamese Networks](https://en.wikipedia.org/wiki/Siamese_neural_network) +are neural networks which share weights between two or more sister networks, +each producing embedding vectors of its respective inputs. + +In supervised similarity learning, the networks are then trained to maximize the +contrast (distance) between embeddings of inputs of different classes, while minimizing the distance between +embeddings of similar classes, resulting in embedding spaces that reflect +the class segmentation of the training inputs. +""" + +import npu_device +import argparse +import ast +#===============================NPU Migration========================================= +parser = argparse.ArgumentParser() +parser.add_argument('--batch_size', type=int, default=16,help='batch_size') +parser.add_argument('--epochs', type=int, default=10,help='batch_size') +parser.add_argument('--log_steps', type=int, default=3750, help='steps per log') +parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') +parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') +parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') +parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') +parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') +parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') +parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') +parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') +parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') +parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') +parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') +parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') +args = parser.parse_args() + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + npu_device.open().as_default() +#===============================NPU Migration========================================= +npu_config() + +""" +## Setup +""" + +import time +import random +import numpy as np +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import layers +import matplotlib.pyplot as plt +import argparse + +""" +## Hyperparameters +""" + + +epochs = args.epochs +batch_size = args.batch_size +margin = 1 # Margin for constrastive loss. + +""" +## Load the MNIST dataset +""" + +(x_train_val, y_train_val), (x_test, y_test) = keras.datasets.mnist.load_data() + +# Change the data type to a floating point format +x_train_val = x_train_val.astype("float32") +x_test = x_test.astype("float32") + + +""" +## Define training and validation sets +""" + +# Keep 50% of train_val in validation set +x_train, x_val = x_train_val[:30000], x_train_val[30000:] +y_train, y_val = y_train_val[:30000], y_train_val[30000:] +del x_train_val, y_train_val + + +""" +## Create pairs of images + +We will train the model to differentiate between digits of different classes. For +example, digit `0` needs to be differentiated from the rest of the +digits (`1` through `9`), digit `1` - from `0` and `2` through `9`, and so on. +To carry this out, we will select N random images from class A (for example, +for digit `0`) and pair them with N random images from another class B +(for example, for digit `1`). Then, we can repeat this process for all classes +of digits (until digit `9`). Once we have paired digit `0` with other digits, +we can repeat this process for the remaining classes for the rest of the digits +(from `1` until `9`). +""" + + +def make_pairs(x, y): + """Creates a tuple containing image pairs with corresponding label. + + Arguments: + x: List containing images, each index in this list corresponds to one image. + y: List containing labels, each label with datatype of `int`. + + Returns: + Tuple containing two numpy arrays as (pairs_of_samples, labels), + where pairs_of_samples' shape is (2len(x), 2,n_features_dims) and + labels are a binary array of shape (2len(x)). + """ + + num_classes = max(y) + 1 + digit_indices = [np.where(y == i)[0] for i in range(num_classes)] + + pairs = [] + labels = [] + + for idx1 in range(len(x)): + # add a matching example + x1 = x[idx1] + label1 = y[idx1] + idx2 = random.choice(digit_indices[label1]) + x2 = x[idx2] + + pairs += [[x1, x2]] + labels += [1] + + # add a non-matching example + label2 = random.randint(0, num_classes - 1) + while label2 == label1: + label2 = random.randint(0, num_classes - 1) + + idx2 = random.choice(digit_indices[label2]) + x2 = x[idx2] + + pairs += [[x1, x2]] + labels += [0] + + return np.array(pairs), np.array(labels).astype("float32") + + +# make train pairs +pairs_train, labels_train = make_pairs(x_train, y_train) + +# make validation pairs +pairs_val, labels_val = make_pairs(x_val, y_val) + +# make test pairs +pairs_test, labels_test = make_pairs(x_test, y_test) + +""" +We get: + +**pairs_train.shape = (60000, 2, 28, 28)** + +- We have 60,000 pairs +- Each pair contains 2 images +- Each image has shape `(28, 28)` +""" + +""" +Split the training pairs +""" + +x_train_1 = pairs_train[:, 0] # x_train_1.shape is (60000, 28, 28) +x_train_2 = pairs_train[:, 1] + +""" +Split the validation pairs +""" + +x_val_1 = pairs_val[:, 0] # x_val_1.shape = (60000, 28, 28) +x_val_2 = pairs_val[:, 1] + +""" +Split the test pairs +""" + +x_test_1 = pairs_test[:, 0] # x_test_1.shape = (20000, 28, 28) +x_test_2 = pairs_test[:, 1] + + +""" +## Define the model + +There are be two input layers, each leading to its own network, which +produces embeddings. A `Lambda` layer then merges them using an +[Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) and the +merged output is fed to the final network. +""" + +# Provided two tensors t1 and t2 +# Euclidean distance = sqrt(sum(square(t1-t2))) +def euclidean_distance(vects): + """Find the Euclidean distance between two vectors. + + Arguments: + vects: List containing two tensors of same length. + + Returns: + Tensor containing euclidean distance + (as floating point value) between vectors. + """ + + x, y = vects + sum_square = tf.math.reduce_sum(tf.math.square(x - y), axis=1, keepdims=True) + return tf.math.sqrt(tf.math.maximum(sum_square, tf.keras.backend.epsilon())) + + +input = layers.Input((28, 28, 1)) +x = tf.keras.layers.BatchNormalization()(input) +x = layers.Conv2D(4, (5, 5), activation="tanh")(x) +x = layers.AveragePooling2D(pool_size=(2, 2))(x) +x = layers.Conv2D(16, (5, 5), activation="tanh")(x) +x = layers.AveragePooling2D(pool_size=(2, 2))(x) +x = layers.Flatten()(x) + +x = tf.keras.layers.BatchNormalization()(x) +x = layers.Dense(10, activation="tanh")(x) +embedding_network = keras.Model(input, x) + + +input_1 = layers.Input((28, 28, 1)) +input_2 = layers.Input((28, 28, 1)) + +# As mentioned above, Siamese Network share weights between +# tower networks (sister networks). To allow this, we will use +# same embedding network for both tower networks. +tower_1 = embedding_network(input_1) +tower_2 = embedding_network(input_2) + +merge_layer = layers.Lambda(euclidean_distance)([tower_1, tower_2]) +normal_layer = tf.keras.layers.BatchNormalization()(merge_layer) +output_layer = layers.Dense(1, activation="sigmoid")(normal_layer) +siamese = keras.Model(inputs=[input_1, input_2], outputs=output_layer) + + +""" +## Define the constrastive Loss +""" + + +def loss(margin=1): + """Provides 'constrastive_loss' an enclosing scope with variable 'margin'. + + Arguments: + margin: Integer, defines the baseline for distance for which pairs + should be classified as dissimilar. - (default is 1). + + Returns: + 'constrastive_loss' function with data ('margin') attached. + """ + + # Contrastive loss = mean( (1-true_value) * square(prediction) + + # true_value * square( max(margin-prediction, 0) )) + def contrastive_loss(y_true, y_pred): + """Calculates the constrastive loss. + + Arguments: + y_true: List of labels, each label is of type float32. + y_pred: List of predictions of same length as of y_true, + each label is of type float32. + + Returns: + A tensor containing constrastive loss as floating point value. + """ + + square_pred = tf.math.square(y_pred) + margin_square = tf.math.square(tf.math.maximum(margin - (y_pred), 0)) + return tf.math.reduce_mean( + (1 - y_true) * square_pred + (y_true) * margin_square + ) + + return contrastive_loss + + +""" +## Compile the model with the contrastive loss +""" + +siamese.compile(loss=loss(margin=margin), optimizer="RMSprop", metrics=["accuracy"]) + +""" +## Add time history callbacks +""" + +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + +""" +## Train the model +""" + +history = siamese.fit( + [x_train_1, x_train_2], + labels_train, + validation_data=([x_val_1, x_val_2], labels_val), + batch_size=batch_size, + epochs=epochs, + verbose=2, + callbacks=[TimeHistory(args.batch_size,args.log_steps)], +) diff --git a/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..f0318f561 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,195 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 + +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=0 + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="siamese_contrastive_ID2538_for_TensorFlow2.X" +#训练epoch +train_epochs=10 +#训练batch_size +batch_size=16 + + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=True +mixlist_file="../configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="../configs/fusion_switch.cfg" +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_full_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + fi +done +############维测参数############## + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 siamese_contrastive.py \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 + +# Time=`grep "ms/step" $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log| tail -n 2 | grep '3750/3750' |awk -F' ' '{print $5'| awk -F'm' '{print $1}'` +# FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${Time}'}'` +single_batch_step_sec=`grep TimeHistory $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +# train_accuracy=`grep "test acc" $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk -F',' '{print $3}' | awk -F']' '{print $1}'` +train_accuracy=`cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $15}'|awk 'END {print}'` + +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" +echo "E2E Training Duration sec : $e2e_time" + +#稳定性精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据 +#吞吐量,不需要修改 +ActualFPS=${FPS} +#单迭代训练时长,不需要修改 +# TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*1000/'${FPS}'}'` +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +# cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|tr -d '\b\r' | grep -Eo " loss: [0-9]*\.[0-9]*" | awk -F" " '{print $2}' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +# ActualLoss=`awk 'END {print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +ActualLoss=`awk '{print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt|tail -n 1` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..5b830dc1c --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_contrastive_ID2538_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,228 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 + +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=0 + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="siamese_contrastive_ID2538_for_TensorFlow2.X" +#训练epoch +train_epochs=10 +#训练batch_size +batch_size=16 + +# #维测参数,precision_mode需要模型审视修改 +# precision_mode="allow_mix_precision" +# #维持参数,以下不需要修改 +# over_dump=False +# data_dump_flag=False +# data_dump_step="10" +# profiling=False +# autotune=False + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="../configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="../configs/fusion_switch.cfg" +############维测参数############## + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +# #参数校验,不需要修改 +# for para in $* +# do +# if [[ $para == --precision_mode* ]];then +# precision_mode=`echo ${para#*=}` +# elif [[ $para == --over_dump* ]];then +# over_dump=`echo ${para#*=}` +# over_dump_path=${cur_path}/output/overflow_dump +# mkdir -p ${over_dump_path} +# elif [[ $para == --data_dump_flag* ]];then +# data_dump_flag=`echo ${para#*=}` +# data_dump_path=${cur_path}/output/data_dump +# mkdir -p ${data_dump_path} +# elif [[ $para == --data_dump_step* ]];then +# data_dump_step=`echo ${para#*=}` +# elif [[ $para == --profiling* ]];then +# profiling=`echo ${para#*=}` +# profiling_dump_path=${cur_path}/output/profiling +# mkdir -p ${profiling_dump_path} +# elif [[ $para == --data_path* ]];then +# data_path=`echo ${para#*=}` +# fi +# done + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + fi +done +############维测参数############## + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +cp $data_path/mnist.npz /root/.keras/datasets/ + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 siamese_contrastive.py \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 + +# Time=`grep "ms/step" $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log| tail -n 2 | grep '3750/3750' | awk -F' ' '{print $5}' | awk -F'm' '{print $1}'` +# FPS=`awk 'BEGIN{printf "%.2f\n",'1000'*'${batch_size}'/'${Time}'}'` +single_batch_step_sec=`grep TimeHistory $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log| tail -n +3|awk '{sum+=$4} END {print sum/NR}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +# train_accuracy=`grep 'test acc' $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk -F',' '{print $3}' |awk -F']' '{print $1}'` +train_accuracy=`cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $15}'|awk 'END {print}'` + +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" +echo "E2E Training Duration sec : $e2e_time" + +#稳定性精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据 +#吞吐量,不需要修改 +ActualFPS=${FPS} +#单迭代训练时长,不需要修改 +# TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*1000/'${FPS}'}'` +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +# cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| tr -d '\b\r' | grep -Eo " loss: [0-9]*\.[0-9]*" | awk -F" " '{print $2}'>> $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +# ActualLoss=`awk 'END {print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +ActualLoss=`awk 'END {print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file -- Gitee From f52d3a9274678c8a44cca647d9e6101dfdbe3d85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:20:56 +0000 Subject: [PATCH 47/54] =?UTF-8?q?siamese=5Fnetwork=5FID2539=5Ffor=5FTensor?= =?UTF-8?q?Flow2.X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 21 + .../README.md | 194 +++++++ .../modelzoo_level.txt | 3 + .../requirements.txt | 0 .../siamese_network.py | 509 ++++++++++++++++++ .../test/train_full_1p.sh | 164 ++++++ .../test/train_performance_1p_dynamic.sh | 162 ++++++ .../test/train_performance_1p_static.sh | 165 ++++++ 8 files changed, 1218 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/siamese_network.py create mode 100644 TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/test/train_performance_1p_dynamic.sh create mode 100644 TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/test/train_performance_1p_static.sh diff --git a/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..51d555a15 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Ke YU + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..6fa4959ba --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/README.md @@ -0,0 +1,194 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Image Classification** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.4.11** + +**大小(Size):42KB** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Official** + +**描述(Description):基于TensorFlow框架的使用三元组损失函数比较图像相似性的孪生网络训练代码** + +

概述

+ +## 简述 + +孪生网络,是在两个或多个姐妹网络之间共享权重的神经网络,每个生成其各自输入的嵌入向量。此示例使用具有三个相同子网的连体网络。 我们将为模型提供三个图像,其中两个相似(anchor 和 positive样本),第三个将不相关(negative样本)。 +此示例使用三元组损失函数及Totally Looks Like数据集。 + +- 参考论文: + + skip + +- 参考实现: + + https://github.com/keras-team/keras-io/blob/master/examples/vision/siamese_network.py + +- 适配昇腾 AI 处理器的实现: + + skip + +- 通过Git获取对应commit\_id的代码方法如下: + + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + + +## 默认配置 + +- 网络结构: + - 11-layers + - 75168640 total params + +- 训练超参(单卡): + - Batch size: 32 + - Train epochs: 10 + + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 + +拉起脚本中,传入--precision_mode='allow_mix_precision' + +``` + ./train_performance_1p_16bs.sh --help + +parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message +``` + +相关代码示例: + +``` +flags.DEFINE_string(name='precision_mode', default= 'allow_fp32_to_fp16', + help='allow_fp32_to_fp16/force_fp16/ ' + 'must_keep_origin_dtype/allow_mix_precision.') + +npu_device.global_options().precision_mode=FLAGS.precision_mode +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + +

快速上手

+ +## 数据集准备 + +1、用户自行准备好数据集,本网络使用的数据集是Totally Looks Like数据集 + +数据集目录参考如下: + +``` +├──totally_looks_like +│ ├──left +│ ├──model +│ │ ├──resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5 +│ ├──right +``` + + + +## 模型训练 + +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + 2. 单卡训练 + + 2. 1单卡训练指令(脚本位于siamese_network_ID2539_for_TensorFlow2.X/test/train_full_1p.sh),需要先使用cd命令进入test目录下,再使用下面的命令启动训练。请确保下面例子中的“--data_path”修改为用户的数据路径,这里选择将数据文件夹放在home目录下。 + + bash train_full_1p.sh --data_path=/home + + + + +

高级参考

+ +## 脚本和示例代码 + +``` +|--LICENSE +|--README.md #说明文档 +|--siamese_network.py #训练代码 +|--requirements.txt #所需依赖 +|--test #训练脚本目录 +| |--train_full_1p.sh #全量训练脚本 +| |--train_performance_1p_dynamic.sh #performance动态shape训练脚本 +| |--train_performance_1p_static.sh #performance静态shape训练脚本 +``` + +## 脚本参数 + +``` +--data_path # the path to train data +--epoch # epochs of training +--static_shape # static_shape, default is False +--precision_mode # the path to save over dump data +--over_dump # if or not over detection, default is False +--data_dump_flag # data dump flag, default is False +--data_dump_step # data dump step, default is 10 +--profiling # if or not profiling for performance debug, default is False +--profiling_dump_path # the path to save profiling data +--over_dump_path # the path to save over dump data +--data_dump_path # the path to save dump data +--use_mixlist # use_mixlist flag, default is False +--fusion_off_flag # fusion_off flag, default is False +--mixlist_file # mixlist file name, default is ops_info.json +--fusion_off_file # fusion_off file name, default is fusion_switch.cfg +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡或者多卡训练。单卡和多卡通过运行不同脚本,支持单卡,8卡网络训练。模型存储路径为${cur_path}/output/$ASCEND_DEVICE_ID,包括训练的log以及checkpoints文件。以8卡训练为例,loss信息在文件${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..9f9b36084 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:PERFECT +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/siamese_network.py b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/siamese_network.py new file mode 100644 index 000000000..752f2e1cf --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/siamese_network.py @@ -0,0 +1,509 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Title: Image similarity estimation using a Siamese Network with a triplet loss +Authors: [Hazem Essam](https://twitter.com/hazemessamm) and [Santiago L. Valdarrama](https://twitter.com/svpino) +Date created: 2021/03/25 +Last modified: 2021/03/25 +Description: Training a Siamese Network to compare the similarity of images using a triplet loss function. +""" + +""" +## Introduction + +A [Siamese Network](https://en.wikipedia.org/wiki/Siamese_neural_network) is a type of network architecture that +contains two or more identical subnetworks used to generate feature vectors for each input and compare them. + +Siamese Networks can be applied to different use cases, like detecting duplicates, finding anomalies, and face recognition. + +This example uses a Siamese Network with three identical subnetworks. We will provide three images to the model, where +two of them will be similar (_anchor_ and _positive_ samples), and the third will be unrelated (a _negative_ example.) +Our goal is for the model to learn to estimate the similarity between images. + +For the network to learn, we use a triplet loss function. You can find an introduction to triplet loss in the +[FaceNet paper](https://arxiv.org/pdf/1503.03832.pdf) by Schroff et al,. 2015. In this example, we define the triplet +loss function as follows: + +`L(A, P, N) = max(‖f(A) - f(P)‖² - ‖f(A) - f(N)‖² + margin, 0)` + +This example uses the [Totally Looks Like dataset](https://sites.google.com/view/totally-looks-like-dataset) +by [Rosenfeld et al., 2018](https://arxiv.org/pdf/1803.01485v3.pdf). +""" + +""" +## Setup +""" +import npu_device +import matplotlib.pyplot as plt +import numpy as np +import os +import time +import ast +import random +import tensorflow as tf +from pathlib import Path +from tensorflow.keras import applications +from tensorflow.keras import layers +from tensorflow.keras import losses +from tensorflow.keras import optimizers +from tensorflow.keras import metrics +from tensorflow.keras import Model +from tensorflow.keras.applications import resnet +import argparse + +#===============================NPU Migration========================================= +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_path', default="./", + help="""directory to data""") + parser.add_argument('--batch_size', default=32, type=int, + help="""batch size for 1p""") + parser.add_argument('--epochs', default=10, type=int, + help="""epochs""") + parser.add_argument('--static_shape', type=ast.literal_eval, + help='static_shape, default is False') + parser.add_argument("--log_steps", default=50, type=int, + help="TimeHis log Step.") + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval,help='auto_tune flag, default is False') + args, unknown_args = parser.parse_known_args() + if len(unknown_args) > 0: + for bad_arg in unknown_args: + print("ERROR: Unknown command line arg: %s" % bad_arg) + raise ValueError("Invalid command line arg(s)") + return args + +args = parse_args() + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist=args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file=args.fusion_off_file + if args.auto_tune: + npu_device.global_options().auto_tune_mode="RL,GA" + npu_device.open().as_default() +#===============================NPU Migration========================================= +npu_config() + +target_shape = (200, 200) + + +""" +## Load the dataset + +We are going to load the *Totally Looks Like* dataset and unzip it inside the `~/.keras` directory +in the local environment. + +The dataset consists of two separate files: + +* `left.zip` contains the images that we will use as the anchor. +* `right.zip` contains the images that we will use as the positive sample (an image that looks like the anchor). +""" + +cache_dir = Path(args.data_path) +anchor_images_path = cache_dir / "left" +positive_images_path = cache_dir / "right" + +"""shell +gdown --id 1jvkbTr_giSP3Ru8OwGNCg6B4PvVbcO34 +gdown --id 1EzBZUb_mh_Dp_FKD0P4XiYYSd0QBH5zW +unzip -oq left.zip -d $cache_dir +unzip -oq right.zip -d $cache_dir +""" + +""" +## Preparing the data + +We are going to use a `tf.data` pipeline to load the data and generate the triplets that we +need to train the Siamese network. + +We'll set up the pipeline using a zipped list with anchor, positive, and negative filenames as +the source. The pipeline will load and preprocess the corresponding images. +""" + + +def preprocess_image(filename): + """ + Load the specified file as a JPEG image, preprocess it and + resize it to the target shape. + """ + + image_string = tf.io.read_file(filename) + image = tf.image.decode_jpeg(image_string, channels=3) + image = tf.image.convert_image_dtype(image, tf.float32) + image = tf.image.resize(image, target_shape) + return image + + +def preprocess_triplets(anchor, positive, negative): + """ + Given the filenames corresponding to the three images, load and + preprocess them. + """ + + return ( + preprocess_image(anchor), + preprocess_image(positive), + preprocess_image(negative), + ) + + +""" +Let's setup our data pipeline using a zipped list with an anchor, positive, +and negative image filename as the source. The output of the pipeline +contains the same triplet with every image loaded and preprocessed. +""" + +# We need to make sure both the anchor and positive images are loaded in +# sorted order so we can match them together. +anchor_images = sorted( + [str(anchor_images_path / f) for f in os.listdir(anchor_images_path)] +) + +positive_images = sorted( + [str(positive_images_path / f) for f in os.listdir(positive_images_path)] +) + +image_count = len(anchor_images) + +anchor_dataset = tf.data.Dataset.from_tensor_slices(anchor_images) +positive_dataset = tf.data.Dataset.from_tensor_slices(positive_images) + +# To generate the list of negative images, let's randomize the list of +# available images and concatenate them together. +rng = np.random.RandomState(seed=42) +rng.shuffle(anchor_images) +rng.shuffle(positive_images) + +negative_images = anchor_images + positive_images +np.random.RandomState(seed=32).shuffle(negative_images) + +negative_dataset = tf.data.Dataset.from_tensor_slices(negative_images) +negative_dataset = negative_dataset.shuffle(buffer_size=4096) + +dataset = tf.data.Dataset.zip((anchor_dataset, positive_dataset, negative_dataset)) +dataset = dataset.shuffle(buffer_size=1024) +dataset = dataset.map(preprocess_triplets) + +# Let's now split our dataset in train and validation. +train_dataset = dataset.take(round(image_count * 0.8)) +val_dataset = dataset.skip(round(image_count * 0.8)) + +train_dataset = train_dataset.batch(args.batch_size, drop_remainder=args.static_shape) +train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE) + +val_dataset = val_dataset.batch(args.batch_size, drop_remainder=args.static_shape) +val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE) + + +""" +## Setting up the embedding generator model + +Our Siamese Network will generate embeddings for each of the images of the +triplet. To do this, we will use a ResNet50 model pretrained on ImageNet and +connect a few `Dense` layers to it so we can learn to separate these +embeddings. + +We will freeze the weights of all the layers of the model up until the layer `conv5_block1_out`. +This is important to avoid affecting the weights that the model has already learned. +We are going to leave the bottom few layers trainable, so that we can fine-tune their weights +during training. +""" + +base_cnn = resnet.ResNet50( + weights="imagenet", input_shape=target_shape + (3,), include_top=False +) + +flatten = layers.Flatten()(base_cnn.output) +dense1 = layers.Dense(512, activation="relu")(flatten) +dense1 = layers.BatchNormalization()(dense1) +dense2 = layers.Dense(256, activation="relu")(dense1) +dense2 = layers.BatchNormalization()(dense2) +output = layers.Dense(256)(dense2) + +embedding = Model(base_cnn.input, output, name="Embedding") + +trainable = False +for layer in base_cnn.layers: + if layer.name == "conv5_block1_out": + trainable = True + layer.trainable = trainable + +""" +## Setting up the Siamese Network model + +The Siamese network will receive each of the triplet images as an input, +generate the embeddings, and output the distance between the anchor and the +positive embedding, as well as the distance between the anchor and the negative +embedding. + +To compute the distance, we can use a custom layer `DistanceLayer` that +returns both values as a tuple. +""" + + +class DistanceLayer(layers.Layer): + """ + This layer is responsible for computing the distance between the anchor + embedding and the positive embedding, and the anchor embedding and the + negative embedding. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def call(self, anchor, positive, negative): + ap_distance = tf.reduce_sum(tf.square(anchor - positive), -1) + an_distance = tf.reduce_sum(tf.square(anchor - negative), -1) + return (ap_distance, an_distance) + + +anchor_input = layers.Input(name="anchor", shape=target_shape + (3,)) +positive_input = layers.Input(name="positive", shape=target_shape + (3,)) +negative_input = layers.Input(name="negative", shape=target_shape + (3,)) + +distances = DistanceLayer()( + embedding(resnet.preprocess_input(anchor_input)), + embedding(resnet.preprocess_input(positive_input)), + embedding(resnet.preprocess_input(negative_input)), +) + +siamese_network = Model( + inputs=[anchor_input, positive_input, negative_input], outputs=distances +) + +""" +## Putting everything together + +We now need to implement a model with custom training loop so we can compute +the triplet loss using the three embeddings produced by the Siamese network. + +Let's create a `Mean` metric instance to track the loss of the training process. +""" + + +class SiameseModel(Model): + """The Siamese Network model with a custom training and testing loops. + + Computes the triplet loss using the three embeddings produced by the + Siamese Network. + + The triplet loss is defined as: + L(A, P, N) = max(‖f(A) - f(P)‖² - ‖f(A) - f(N)‖² + margin, 0) + """ + + def __init__(self, siamese_network, margin=0.5): + super(SiameseModel, self).__init__() + self.siamese_network = siamese_network + self.margin = margin + self.loss_tracker = metrics.Mean(name="loss") + + def call(self, inputs): + return self.siamese_network(inputs) + + def train_step(self, data): + # GradientTape is a context manager that records every operation that + # you do inside. We are using it here to compute the loss so we can get + # the gradients and apply them using the optimizer specified in + # `compile()`. + with tf.GradientTape() as tape: + loss = self._compute_loss(data) + + # Storing the gradients of the loss function with respect to the + # weights/parameters. + gradients = tape.gradient(loss, self.siamese_network.trainable_weights) + + # Applying the gradients on the model using the specified optimizer + self.optimizer.apply_gradients( + zip(gradients, self.siamese_network.trainable_weights) + ) + + # Let's update and return the training loss metric. + self.loss_tracker.update_state(loss) + return {"loss": self.loss_tracker.result()} + + def test_step(self, data): + loss = self._compute_loss(data) + + # Let's update and return the loss metric. + self.loss_tracker.update_state(loss) + return {"loss": self.loss_tracker.result()} + + def _compute_loss(self, data): + # The output of the network is a tuple containing the distances + # between the anchor and the positive example, and the anchor and + # the negative example. + ap_distance, an_distance = self.siamese_network(data) + + # Computing the Triplet Loss by subtracting both distances and + # making sure we don't get a negative value. + loss = ap_distance - an_distance + loss = tf.maximum(loss + self.margin, 0.0) + return loss + + @property + def metrics(self): + # We need to list our metrics here so the `reset_states()` can be + # called automatically. + return [self.loss_tracker] + + +""" +## Training + +We are now ready to train our model. +""" +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + #self.opt = optimizer + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + +siamese_model = SiameseModel(siamese_network) +siamese_model.compile(optimizer=optimizers.Adam(0.0001)) +siamese_model.fit(train_dataset, epochs=args.epochs, validation_data=val_dataset, callbacks=[TimeHistory(args.batch_size,args.log_steps)], verbose=2) +""" +## Inspecting what the network has learned + +At this point, we can check how the network learned to separate the embeddings +depending on whether they belong to similar images. + +We can use [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity) to measure the +similarity between embeddings. + +""" + +""" +## Summary + +1. The `tf.data` API enables you to build efficient input pipelines for your model. It is +particularly useful if you have a large dataset. You can learn more about `tf.data` +pipelines in [tf.data: Build TensorFlow input pipelines](https://www.tensorflow.org/guide/data). + +2. In this example, we use a pre-trained ResNet50 as part of the subnetwork that generates +the feature embeddings. By using [transfer learning](https://www.tensorflow.org/guide/keras/transfer_learning?hl=en), +we can significantly reduce the training time and size of the dataset. + +3. Notice how we are [fine-tuning](https://www.tensorflow.org/guide/keras/transfer_learning?hl=en#fine-tuning) +the weights of the final layers of the ResNet50 network but keeping the rest of the layers untouched. +Using the name assigned to each layer, we can freeze the weights to a certain point and keep the last few layers open. + +4. We can create custom layers by creating a class that inherits from `tf.keras.layers.Layer`, +as we did in the `DistanceLayer` class. + +5. We used a cosine similarity metric to measure how to 2 output embeddings are similar to each other. + +6. You can implement a custom training loop by overriding the `train_step()` method. `train_step()` uses +[`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape), +which records every operation that you perform inside it. In this example, we use it to access the +gradients passed to the optimizer to update the model weights at every step. For more details, check out the +[Intro to Keras for researchers](https://keras.io/getting_started/intro_to_keras_for_researchers/) +and [Writing a training loop from scratch](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch?hl=en). + +""" diff --git a/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..8fcd570aa --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,164 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="siamese_network_ID2539_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +train_epochs=10 +#训练step +#train_steps=5 +#学习率 +#learning_rate=1e-4 +#ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_static_eval.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +cd $cur_path/ +cp $data_path/model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5 /root/.keras/models + +##############执行训练########## +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 siamese_network.py \ + --data_path=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --static_shape=True \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#echo "Final Performance ms/step : $average_perf"#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy='None' +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/test/train_performance_1p_dynamic.sh b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/test/train_performance_1p_dynamic.sh new file mode 100644 index 000000000..bb1ecd386 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/test/train_performance_1p_dynamic.sh @@ -0,0 +1,162 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="siamese_network_ID2539_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +train_epochs=2 +#训练step +#train_steps=5 +#学习率 +#learning_rate=1e-4 +#ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_static_eval.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +cd $cur_path/ + +##############执行训练########## +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 siamese_network.py \ + --data_path=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --static_shape=False \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" +#echo "Final Performance ms/step : $average_perf"#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy='None' +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}_static" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/test/train_performance_1p_static.sh new file mode 100644 index 000000000..2bb2f4f64 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/siamese_network_ID2539_for_TensorFlow2.X/test/train_performance_1p_static.sh @@ -0,0 +1,165 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=32 +#网络名称,同目录名称 +Network="siamese_network_ID2539_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=2 +#训练step +#train_steps=5 +#学习率 +#learning_rate=1e-4 +#ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage: ./train_performance_1p_static_eval.sh" + exit 1 +fi + +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path \" must be config" + exit 1 +fi + +cd $cur_path/ +cp $data_path/model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5 /root/.keras/models + +##############执行训练########## +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 siamese_network.py \ + --data_path=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --static_shape=True \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --auto_tune=${auto_tune} \ + --profiling_dump_path=${profiling_dump_path} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait +end=$(date +%s) +e2e_time=$(( $end - $start )) + +echo "Final Training Duration sec : $e2e_time" + +#echo "Final Performance ms/step : $average_perf"#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +single_batch_step_sec=`grep TimeHistory $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $4}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'${single_batch_step_sec}'}'` +wait + +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy='None' +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'*'${RANK_SIZE}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| grep val_loss | awk -F " " '{print $6}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}_static" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log -- Gitee From cdae830e313fd5294a65237b94da2e3415d02dbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:21:15 +0000 Subject: [PATCH 48/54] =?UTF-8?q?STAMP=5FID2628=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../STAMP_ID2628_for_TensorFlow2.X/LICENSE | 284 ++++++++++++++++++ .../STAMP_ID2628_for_TensorFlow2.X/README.md | 229 ++++++++++++++ .../README_BAK.md | 72 +++++ .../__init__.py | 29 ++ .../evaluate.py | 75 +++++ .../STAMP_ID2628_for_TensorFlow2.X/model.py | 168 +++++++++++ .../modelzoo_level.txt | 3 + .../STAMP_ID2628_for_TensorFlow2.X/modules.py | 114 +++++++ .../requirements.txt | 0 .../STAMP_ID2628_for_TensorFlow2.X/run_1p.sh | 2 + .../test/train_full_1p.sh | 152 ++++++++++ .../test/train_performance_1p.sh | 157 ++++++++++ .../STAMP_ID2628_for_TensorFlow2.X/train.py | 221 ++++++++++++++ .../STAMP_ID2628_for_TensorFlow2.X/utils.py | 145 +++++++++ 14 files changed, 1651 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/README_BAK.md create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/__init__.py create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/evaluate.py create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/model.py create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/modules.py create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/run_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/train.py create mode 100644 TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/utils.py diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..1b08781e3 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/README.md @@ -0,0 +1,229 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Recommendation** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.04.08** + +**大小(Size):512K** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Research** + +**描述(Description):基于TensorFlow2.X框架的推荐网络训练代码** + + +

概述

+ +## 简述 + + STAMP模型的全称是:Short-Term Attention/Memory Priority Model for Session-based Recommendation。该模型是一种新的短期注意/记忆优先级模型,该模型能够从会话上下文的长期记忆中捕获用户的通用兴趣,同时从最后点击的短期记忆中考虑用户当前的兴趣。 + + - 参考论文: + skip + + + - 参考实现: + https://github.com/ZiyaoGeng/Recommender-System-with-TF2.0/tree/master/STAMP(https://github.com/ZiyaoGeng/Recommender-System-with-TF2.0/tree/master/STAMP) + + + - 适配昇腾 AI 处理器的实现: + skip + + - 通过Git获取对应commit\_id的代码方法如下: + + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + + + + + +## 默认配置 + + +- 网络结构 + - 16-layer, 4M parameters + + +- 训练超参(单卡): + - Batch size: 128 + - maxlen:40 + - embed_dim:100 + - learning_rate: 0.005 + - Train epoch: 30 + + +## 支持特性 + +| 特性列表 | 是否支持 | +|-------|------| +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +拉起脚本中,传入--precision_mode='allow_mix_precision' + +``` + ./train_performance_1p_16bs.sh --help + +parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message +``` + +相关代码示例: + +``` +flags.DEFINE_string(name='precision_mode', default= 'allow_fp32_to_fp16', + help='allow_fp32_to_fp16/force_fp16/ ' + 'must_keep_origin_dtype/allow_mix_precision.') + +npu_device.global_options().precision_mode=FLAGS.precision_mode +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + + + +

快速上手

+ + +## 数据集准备 + +采用Diginetica数据集进行测试,将其处理为用户序列。数据集的处理见utils文件,主要分为: + + - 读取数据(可以取部分数据进行测试); + - 过滤掉session长度为1的样本; + - 过滤掉包含某物品(出现次数小于5)的样本; + - 对特征itemId进行LabelEncoder,将其转化为0, 1,...范围; + - 按照evetdate、sessionId排序; + - 按照eventdate划分训练集、验证集、测试集; + - 生成序列【无负样本】,生成新的数据,格式为hist, label,因此需要使用tf.keras.preprocessing.sequence.pad_sequences方法进行填充/切割,此外,由于序列中只有一个特征item_id,经过填充/切割后,维度会缺失,所以需要进行增添维度; + - 生成一个物品池item pooling:物品池按序号排序; + - 得到feature_columns:无密集数据,稀疏数据为item_id; + - 生成用户行为列表,方便后续序列Embedding的提取,在此处,即item_id; + - 最后返回feature_columns, behavior_list, (train_X, train_y), (val_X, val_y), (test_X, test_y); + + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2.1 设置单卡训练参数(脚本位于STAMP_ID2628_for_TensorFlow2.X/test/train_full_1p.sh) + + bash train_full_1p.sh --precision_mode='allow_mix_precision' + + 2.2 单卡训练指令(STAMP_ID2628_for_TensorFlow2.X/test) + + ``` + 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡 + bash train_full_1p.sh --data_path=xx + 数据集应为h5类型,配置data_path时需指定为data这一层,例:--data_path=/home/data + ├─data + ├─product-categories.csv + ├─products.csv + ├─train-clicks.csv + ├─train-item-views.csv + ├─train-purchases.csv + ├─train-queries.csv + + ``` + + + +

迁移学习指导

+ + +1. 数据集准备。 + 请参见“快速上手”中的数据集准备 + +2. 修改训练脚本。 + + _(修改模型配置文件、模型脚本,根据客户实际业务数据做对应模型的修改,以适配)_ + + 1. 修改配置文件。 + + 2. 加载预训练模型。_(预加载模型继续训练或者使用用户的数据集继续训练)_ + +3. 模型训练。 + + _可以参考“模型训练”中训练步骤。(根据实际情况,开源数据集与自定义数据集的训练方法是否一致?)_ + +4. 模型评估。(根据实际情况)_可以参考“模型训练”中训练步骤。_ + +

高级参考

+ +## 脚本和示例代码 + +## 脚本参数 + +``` + --data_path default='./',help="""directory to data""" + --batch_size default=128, type=int,help="""batch size for 1p""" + --epochs default=30, type=int,help="""epochs""" + --steps_per_epoch default=50, type=int,help="""Eval batch size""" + --learning_rate default=0.005, type=float,help="""The value of learning_rate""" + --precision_mode default="allow_mix_precision", type=str,help='the path to save over dump data' + --over_dump dest='over_dump', type=ast.literal_eval,help='if or not over detection, default is False' + --data_dump_flag dest='data_dump_flag', type=ast.literal_eval,help='data dump flag, default is False' + --data_dump_step default="10",help='data dump step, default is 10' + --profiling dest='profiling', type=ast.literal_eval help='if or not profiling for performance debug, default is False' + --profiling_dump_path default="/home/data", type=str, help='the path to save profiling data' + --over_dump_path default="/home/data", type=str, help='the path to save over dump data' + --data_dump_path default="/home/data", type=str, help='the path to save dump data' + --use_mixlist dest='use_mixlist', type=ast.literal_eval,help='use_mixlist flag, default is False' + --fusion_off_flag dest='fusion_off_flag', type=ast.literal_eval,help='fusion_off flag, default is False' + --mixlist_file default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json' + --fusion_off_file default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg' +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡或者多卡训练。单卡和多卡通过运行不同脚本,支持单卡,8卡网络训练。模型存储路径为${cur_path}/output/$ASCEND_DEVICE_ID,包括训练的log以及checkpoints文件。以8卡训练为例,loss信息在文件${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。 diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/README_BAK.md b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/README_BAK.md new file mode 100644 index 000000000..ad5f4ec2d --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/README_BAK.md @@ -0,0 +1,72 @@ +## STAMP + +### 1. 论文 +STAMP: Short-Term Attention/Memory Priority Model for Session-based Recommendation + +**创新**:**结合长期记忆(序列兴趣)和短期记忆(当前兴趣)** + +原文笔记:https://mp.weixin.qq.com/s/TXOSQAkwky1d27PciKjqtQ + + + +### 2. 模型结构 + +
+ + + +### 3. 实验数据集 + +采用`Diginetica`数据集进行测试,将其处理为用户序列。数据集的处理见`utils`文件,主要分为: +1. 读取数据(可以取部分数据进行测试); +2. 过滤掉session长度为1的样本; +3. 过滤掉包含某物品(出现次数小于5)的样本; +4. 对特征`itemId`进行`LabelEncoder`,将其转化为`0, 1,...`范围; +5. 按照`evetdate、sessionId`排序; +6. 按照`eventdate`划分训练集、验证集、测试集; +7. 生成序列【无负样本】,生成新的数据,格式为`hist, label`,因此需要使用`tf.keras.preprocessing.sequence.pad_sequences`方法进行填充/切割,此外,**由于序列中只有一个特征`item_id`,经过填充/切割后,维度会缺失,所以需要进行增添维度**; +8. 生成一个物品池`item pooling`:物品池按序号排序; +9. 得到`feature_columns`:无密集数据,稀疏数据为`item_id`; +10. 生成用户行为列表,方便后续序列Embedding的提取,在此处,即`item_id`; +11. 最后返回`feature_columns, behavior_list, (train_X, train_y), (val_X, val_y), (test_X, test_y)`; + + + +### 4. 模型API + +```python +class STAMP(tf.keras.Model): + def __init__(self, feature_columns, behavior_feature_list, item_pooling, maxlen=40, activation='tanh', embed_reg=1e-4): + """ + STAMP + :param feature_columns: A list. dense_feature_columns + sparse_feature_columns + :param behavior_feature_list: A list. the list of behavior feature names + :param item_pooling: A Ndarray or Tensor, shape=(m, n), + m is the number of items, and n is the number of behavior feature. The item pooling. + :param activation: A String. The activation of FFN. + :param maxlen: A scalar. Maximum sequence length. + :param embed_reg: A scalar. The regularizer of embedding. + """ +``` + + + +### 5. 实验超参数 + +- file:Amazon Electronic文件; +- maxlen:最大序列长度,`40`; +- +- embed_dim:Embedding维度,`100`; +- K:评价指标的@K,`20`; +- + +- learning_rate:学习率,`0.005`; +- batch_size:`128`; +- epoch:`30`; + + + +### 6. 实验结果 + +采用Diginetica数据集数据,最终测试集的结果为: + diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/__init__.py b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/__init__.py new file mode 100644 index 000000000..9772d6bd7 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/__init__.py @@ -0,0 +1,29 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/evaluate.py b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/evaluate.py new file mode 100644 index 000000000..db3769ff9 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/evaluate.py @@ -0,0 +1,75 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +''' +Descripttion: Evaluate +Author: Ziyao Geng +Date: 2020-10-25 10:07:17 +LastEditors: ZiyaoGeng +LastEditTime: 2020-10-26 12:47:28 +''' + +import numpy as np + + +def getHit(pred_y, true_y): + """ + calculate hit rate + :return: + """ + # reversed + pred_index = np.argsort(-pred_y)[:, :_K] + return sum([true_y[i] in pred_index[i] for i in range(len(pred_index))]) / len(pred_index) + + +def getMRR(pred_y, true_y): + """ + """ + pred_index = np.argsort(-pred_y)[:, :_K] + return sum([1 / (np.where(true_y[i] == pred_index[i])[0][0] + 1) \ + for i in range(len(pred_index)) if len(np.where(true_y[i] == pred_index[i])[0]) != 0]) / len(pred_index) + + +def evaluate_model(model, test, K): + """ + evaluate model + :param model: model + :param test: test set + :param K: top K + :return: hit rate, mrr + """ + global _K + _K = K + test_X, test_y = test + pred_y = model.predict(test_X) + hit_rate = getHit(pred_y, test_y) + mrr = getMRR(pred_y, test_y) + + + return hit_rate, mrr \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/model.py b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/model.py new file mode 100644 index 000000000..d2e17d7cc --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/model.py @@ -0,0 +1,168 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Created on Oct 23, 2020 + +model: STAMP: Short-Term Attention/Memory Priority Model for Session-based Recommendation + +@author: Ziyao Geng +""" +import tensorflow as tf +from tensorflow.keras.regularizers import l2 +from tensorflow.keras.layers import Layer, Dense, LayerNormalization, \ + Dropout, Embedding, Flatten, Input + +from modules import * + + +class STAMP(tf.keras.Model): + def __init__(self, feature_columns, behavior_feature_list, item_pooling, maxlen=40, activation='tanh', embed_reg=1e-4): + """ + STAMP + :param feature_columns: A list. dense_feature_columns + sparse_feature_columns + :param behavior_feature_list: A list. the list of behavior feature names + :param item_pooling: A Ndarray or Tensor, shape=(m, n), + m is the number of items, and n is the number of behavior feature. The item pooling. + :param activation: A String. The activation of FFN. + :param maxlen: A scalar. Maximum sequence length. + :param embed_reg: A scalar. The regularizer of embedding. + """ + super(STAMP, self).__init__() + # maximum sequence length + self.maxlen = maxlen + + # item pooling + self.item_pooling = item_pooling + self.dense_feature_columns, self.sparse_feature_columns = feature_columns + + # len + self.other_sparse_len = len(self.sparse_feature_columns) - len(behavior_feature_list) + self.dense_len = len(self.dense_feature_columns) + # if behavior feature list contains itemId and item category id, seq_len = 2 + self.seq_len = len(behavior_feature_list) + + # embedding dim, each sparse feature embedding dimension is the same + self.embed_dim = self.sparse_feature_columns[0]['embed_dim'] + + # other embedding layers + self.embed_sparse_layers = [Embedding(input_dim=feat['feat_num'], + input_length=1, + output_dim=feat['embed_dim'], + embeddings_initializer='random_uniform', + embeddings_regularizer=l2(embed_reg)) + for feat in self.sparse_feature_columns + if feat['feat'] not in behavior_feature_list] + # behavior embedding layers + self.embed_seq_layers = [Embedding(input_dim=feat['feat_num'], + input_length=1, + output_dim=feat['embed_dim'], + embeddings_initializer='random_uniform', + embeddings_regularizer=l2(embed_reg)) + for feat in self.sparse_feature_columns + if feat['feat'] in behavior_feature_list] + + # Attention + self.attention_layer = Attention_Layer(d=self.embed_dim) + + # FNN, hidden unit must be equal to embedding dimension + self.ffn1 = Dense(self.embed_dim, activation=activation) + self.ffn2 = Dense(self.embed_dim, activation=activation) + + def call(self, inputs): + # dense_inputs and sparse_inputs is empty + dense_inputs, sparse_inputs, seq_inputs = inputs + + x = dense_inputs + # other + for i in range(self.other_sparse_len): + x = tf.concat([x, self.embed_sparse_layers[i](sparse_inputs[:, i])], axis=-1) + + # seq + seq_embed, m_t, item_pooling_embed = None, None, None + for i in range(self.seq_len): + # item sequence embedding + seq_embed = self.embed_seq_layers[i](seq_inputs[:, i]) if seq_embed is None \ + else seq_embed + self.embed_seq_layers[i](seq_inputs[:, i]) + # last click item embedding + m_t = self.embed_seq_layers[i](seq_inputs[:, i, -1]) if m_t is None \ + else m_t + self.embed_seq_layers[i](seq_inputs[-1, i, -1]) # (None, d) + # item pooling embedding + item_pooling_embed = self.embed_seq_layers[i](self.item_pooling[:, i]) \ + if item_pooling_embed is None \ + else item_pooling_embed + self.embed_seq_layers[i](self.item_pooling[:, i]) # (m, d) + + # calculate m_s + m_s = tf.reduce_mean(seq_embed, axis=1) # (None, d) + + # attention + m_a = self.attention_layer([seq_embed, m_s, m_t]) # (None, d) + # if model is STMP, m_a = m_s + # m_a = m_s + + # try to add other embedding vector + if self.other_sparse_len != 0 or self.dense_len != 0: + m_a = tf.concat([m_a, x], axis=-1) + m_t = tf.concat([m_t, x], axis=-1) + + # FFN + h_s = self.ffn1(m_a) # (None, d) + h_t = self.ffn2(m_t) # (None, d) + + # Calculate + # h_t * item_pooling_embed, (None, 1, d) * (m, d) = (None, m, d) + # () mat h_s, (None, m, d) matmul (None, d, 1) = (None, m, 1) + z = tf.matmul(tf.multiply(tf.expand_dims(h_t, axis=1), item_pooling_embed), tf.expand_dims(h_s, axis=-1)) + z = tf.squeeze(z, axis=-1) # (None, m) + + # Outputs + outputs = tf.nn.softmax(z) + return outputs + + def summary(self): + dense_inputs = Input(shape=(self.dense_len,), dtype=tf.float32) + sparse_inputs = Input(shape=(self.other_sparse_len,), dtype=tf.int32) + seq_inputs = Input(shape=(self.seq_len, self.maxlen), dtype=tf.int32) + tf.keras.Model(inputs=[dense_inputs, sparse_inputs, seq_inputs], + outputs=self.call([dense_inputs, sparse_inputs, seq_inputs])).summary() + + +def test_model(): + dense_features = [] # [{'feat': 'a'}, {'feat': 'b'}] + sparse_features = [{'feat': 'item_id', 'feat_num': 100, 'embed_dim': 8}, + {'feat': 'cate_id', 'feat_num': 100, 'embed_dim': 8}, + {'feat': 'adv_id', 'feat_num': 100, 'embed_dim': 8}] + behavior_list = ['item_id', 'cate_id'] + item_pooling = tf.constant([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) + features = [dense_features, sparse_features] + model = STAMP(features, behavior_list, item_pooling) + model.summary() + + +# test_model() \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..a829ab59b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:NOK +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/modules.py b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/modules.py new file mode 100644 index 000000000..aa9efc6b5 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/modules.py @@ -0,0 +1,114 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +''' +Descripttion: +Author: Ziyao Geng +Date: 2020-10-23 11:10:08 +LastEditors: ZiyaoGeng +LastEditTime: 2020-10-26 09:57:35 +''' +""" +Created on Oct 23, 2020 + +modules of STAMP: attention mechanism + +@author: Ziyao Geng +""" +import tensorflow as tf + +from tensorflow.keras.regularizers import l2 +from tensorflow.keras.losses import Loss +from tensorflow.keras.layers import Layer + + +class Attention_Layer(Layer): + """ + Attention Layer + """ + def __init__(self, d, reg=1e-4): + """ + + :param d: A scalar. The dimension of embedding. + :param reg: A scalar. The regularizer of parameters + """ + self.d = d + self.reg = reg + super(Attention_Layer, self).__init__() + + def build(self, input_shape): + self.W0 = self.add_weight(name='W0', + shape=(self.d, 1), + initializer=tf.random_normal_initializer, + regularizer=l2(self.reg), + trainable=True) + self.W1 = self.add_weight(name='W1', + shape=(self.d, self.d), + initializer=tf.random_normal_initializer, + regularizer=l2(self.reg), + trainable=True) + self.W2 = self.add_weight(name='W2', + shape=(self.d, self.d), + initializer=tf.random_normal_initializer, + regularizer=l2(self.reg), + trainable=True) + self.W3 = self.add_weight(name='W3', + shape=(self.d, self.d), + initializer=tf.random_normal_initializer, + regularizer=l2(self.reg), + trainable=True) + self.b = self.add_weight(name='b', + shape=(self.d,), + initializer=tf.random_normal_initializer, + regularizer=l2(self.reg), + trainable=True) + + def call(self, inputs): + seq_embed, m_s, x_t = inputs + """ + seq_embed: (None, seq_len, d) + W1: (d, d) + x_t: (None, d) + W2: (d, d) + m_s: (None, d) + W3: (d, d) + W0: (d, 1) + """ + alpha = tf.matmul(tf.nn.sigmoid( + tf.tensordot(seq_embed, self.W1, axes=[2, 0]) + tf.expand_dims(tf.matmul(x_t, self.W2), axis=1) + + tf.expand_dims(tf.matmul(m_s, self.W3), axis=1) + self.b), self.W0) + m_a = tf.reduce_sum(tf.multiply(alpha, seq_embed), axis=1) # (None, d) + return m_a + + +# class CrossEntropy(Loss): +# def call(self, y_true, y_pred): +# y_true = tf.one_hot(tf.squeeze(tf.cast(y_true, dtype=tf.int32), axis=-1), depth=y_pred.shape[-1]) +# return - (tf.math.log(tf.reduce_sum(y_pred * y_true)) + tf.reduce_sum(tf.math.log(1.0 - y_pred)) - \ +# tf.math.log(1 - tf.reduce_sum(y_pred * y_true))) \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/run_1p.sh b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/run_1p.sh new file mode 100644 index 000000000..d549010ea --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/run_1p.sh @@ -0,0 +1,2 @@ +cur_path='pwd' +python3 ${cur_path}/train.py > loss+perf_gpu.txt 2>&1 \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..86ceb1fd1 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,152 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="STAMP_ID2628_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +train_epochs=30 +#训练step +steps_per_epoch=50 +#学习率 +learning_rate=0.005 +#ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="ops_info.json" +fusion_off_flag=False +fusion_off_file="fusion_switch.cfg" +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage:./train_performance_1p.sh $data_path" + exit 1 +fi + +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/test/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --ckpt_path* ]];then + ckpt_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path/ +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/$ASCEND_DEVICE_ID + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 train.py --data_path=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --steps_per_epoch=$steps_per_epoch \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --learning_rate=$learning_rate >$cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait +end=$(date +%s) +e2etime=$(( $end - $start )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ${steps_per_epoch}/${steps_per_epoch} $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END{for(i=1;i<=NF;i++) if($i ~ /s/) print $i}' | tr -cd "[0-9]"` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${steps_per_epoch}'*'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy='null' + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep -Eo " loss: [0-9]*\.[0-9]*" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk -F " " '{print $2}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..9e7be8b1e --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,157 @@ +#!/bin/bash +cur_path=`pwd`/../ + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=128 +#网络名称,同目录名称 +Network="STAMP_ID2628_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RankSize=1 +#训练epoch,可选 +train_epochs=8 +#训练step +steps_per_epoch=50 +#学习率 +learning_rate=0.005 +#ckpt_path="" +#参数配置 +data_path="" + +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="ops_info.json" +fusion_off_flag=False +fusion_off_file="fusion_switch.cfg" +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage:./train_performance_1p.sh $data_path" + exit 1 +fi + +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/test/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --ckpt_path* ]];then + ckpt_path=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + +##############执行训练########## +cd $cur_path/ +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/$ASCEND_DEVICE_ID + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + +start=$(date +%s) +nohup python3 train.py --data_path=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --steps_per_epoch=$steps_per_epoch \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --learning_rate=$learning_rate >$cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait +end=$(date +%s) +e2etime=$(( $end - $start )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep ${steps_per_epoch}/${steps_per_epoch} $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END{for(i=1;i<=NF;i++) if($i ~ /s/) print $i}' | tr -cd "[0-9]"` +wait +FPS=`awk 'BEGIN{printf "%.2f\n",'${steps_per_epoch}'*'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +#train_accuracy=`grep " val_accuracy" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $NF}'` +train_accuracy='null' +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +#grep " loss:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $8}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +grep -Eo " loss: [0-9]*\.[0-9]*" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk -F " " '{print $2}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/train.py b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/train.py new file mode 100644 index 000000000..3f77e0691 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/train.py @@ -0,0 +1,221 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +''' +Descripttion: train STAMP model +Author: Ziyao Geng +Date: 2020-10-25 09:27:23 +LastEditors: ZiyaoGeng +LastEditTime: 2020-10-27 10:39:34 +''' +import npu_device +import ast + +from time import time +import tensorflow as tf +from tensorflow.keras.losses import binary_crossentropy +from tensorflow.keras.optimizers import Adam +import os + +from model import STAMP +from modules import * +from evaluate import * +from utils import * + +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +import argparse + + +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--data_path', default='./', + help="""directory to data""") + parser.add_argument('--batch_size', default=128, type=int, + help="""batch size for 1p""") + parser.add_argument('--epochs', default=30, type=int, + help="""epochs""") + parser.add_argument('--steps_per_epoch', default=50, type=int, + help="""Eval batch size""") + parser.add_argument('--learning_rate', default=0.005, type=float, + help="""The value of learning_rate""") + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str, + help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval, + help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str, help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str, help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str, help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str, + help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str, + help='fusion_off file name, default is fusion_switch.cfg') + args = parser.parse_args() + def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode == 'allow_mix_precision': + npu_device.global_options().modify_mixlist = "../configs/" + args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file = "../configs/" + args.fusion_off_file + npu_device.open().as_default() + print("Npu init") + npu_config() + # args, unknown_args = parser.parse_known_args() + return args + + +args = parse_args() +data_dir = args.data_path + +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + #self.opt = optimizer + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time() + self.epoch_start = time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + +if __name__ == '__main__': + # ========================= Hyper Parameters ======================= + # file = '../dataset/Diginetica/train-item-views.csv' + dirname = "train-item-views.csv" + file = os.path.join(data_dir, dirname) + maxlen = 8 + + embed_dim = 100 + K = 20 + + learning_rate = args.learning_rate + batch_size = args.batch_size + epochs = args.epochs + steps_per_epoch = args.steps_per_epoch + # ========================== Create dataset ======================= + feature_columns, behavior_list, item_pooling, train, val, test = create_diginetica_dataset(file, embed_dim, maxlen) + train_X, train_y = train + val_X, val_y = val + # ============================Build Model========================== + model = STAMP(feature_columns, behavior_list, item_pooling, maxlen) + model.summary() + # ============================model checkpoint====================== + # check_path = 'save/sas_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt' + # checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path, save_weights_only=True, + # verbose=1, period=5) + # =========================Compile============================ + # CrossEntropy() + # tf.losses.SparseCategoricalCrossentropy() + model.compile(loss=tf.losses.SparseCategoricalCrossentropy(), optimizer=Adam(learning_rate=learning_rate)) + + for epoch in range(epochs): + # ===========================Fit============================== + t1 = time() + model.fit( + train_X, + train_y, + validation_data=(val_X, val_y), + epochs=1, + callbacks=[TimeHistory(128,50)], + # callbacks=[tensorboard, checkpoint], + batch_size=batch_size, + verbose=2, + steps_per_epoch=steps_per_epoch, + ) + # model.save_weights(filepath="STAMP", save_format="tf") + # ===========================Test============================== + # t2 = time() + # hit_rate, mrr = evaluate_model(model, test, K) + # print('Iteration %d Fit [%.1f s], Evaluate [%.1f s]: HR = %.4f, MRR = %.4f, ' + # % (epoch, t2 - t1, time() - t2, hit_rate, mrr)) diff --git a/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/utils.py b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/utils.py new file mode 100644 index 000000000..5e4e436c1 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/STAMP_ID2628_for_TensorFlow2.X/utils.py @@ -0,0 +1,145 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +''' +Descripttion: create Diginetica dataset +Author: Ziyao Geng +Date: 2020-10-23 19:52:53 +LastEditors: ZiyaoGeng +LastEditTime: 2020-10-27 10:00:03 +''' +import pandas as pd +import numpy as np +import random +from tqdm import tqdm +from sklearn.preprocessing import LabelEncoder +from tensorflow.keras.preprocessing.sequence import pad_sequences + + +def sparseFeature(feat, feat_num, embed_dim=4): + """ + create dictionary for sparse feature + :param feat: feature name + :param feat_num: the total number of sparse features that do not repeat + :param embed_dim: embedding dimension + :return: + """ + return {'feat': feat, 'feat_num': feat_num, 'embed_dim': embed_dim} + + +def denseFeature(feat): + """ + create dictionary for dense feature + :param feat: dense feature name + :return: + """ + return {'feat': feat} + + +def convert_sequence(data_df): + """ + :param data_df: train, val or test + """ + data_sequence = [] + for sessionId, df in tqdm(data_df[['sessionId', 'itemId']].groupby(['sessionId'])): + item_list = df['itemId'].tolist() + + for i in range(1, len(item_list)): + hist_i = item_list[:i] + # hist_item, next_click_item(label) + data_sequence.append([hist_i, item_list[i]]) + + return data_sequence + +def create_diginetica_dataset(file, embed_dim=8, maxlen=40): + """ + :param file: A string. dataset path + :param embed_dim: A scalar. latent factor + :param maxlen: A scalar. + :return: feature_columns, behavior_list, train, val, test + """ + print('==========Data Preprocess Start============') + # load dataset + data_df = pd.read_csv(file, sep=";") # (1235380, 5) + + # filter out sessions of length of 1 + data_df['session_count'] = data_df.groupby('sessionId')['sessionId'].transform('count') + data_df = data_df[data_df.session_count > 1] # (1144686, 6) + + # filter out items that appear less than 5 times + data_df['item_count'] = data_df.groupby('itemId')['itemId'].transform('count') + data_df = data_df[data_df.item_count >= 5] # (1004834, 7) + + # label encoder itemId, {0, 1, ..., } + le = LabelEncoder() + data_df['itemId'] = le.fit_transform(data_df['itemId']) + + # sorted by eventdate, sessionId + data_df = data_df.sort_values(by=['eventdate', 'sessionId']) + + # split dataset, 1 day for valdation, 7 days for test + train = data_df[data_df.eventdate < '2016-05-25'] # (916485, 7) + val = data_df[data_df.eventdate == '2016-05-25'] # (10400, 7) + test = data_df[data_df.eventdate > '2016-05-25'] # (77949, 7) + + # convert sequence + train = pd.DataFrame(convert_sequence(train), columns=['hist', 'label']) + val = pd.DataFrame(convert_sequence(val), columns=['hist', 'label']) + test = pd.DataFrame(convert_sequence(test), columns=['hist', 'label']) + + # Padding + # not have dense inputs and other sparse inputs + print('==================Padding===================') + train_X = [np.array([0.] * len(train)), np.array([0] * len(train)), + np.expand_dims(pad_sequences(train['hist'], maxlen=maxlen), axis=1)] + train_y = train['label'].values + val_X = [np.array([0] * len(val)), np.array([0] * len(val)), + np.expand_dims(pad_sequences(val['hist'], maxlen=maxlen), axis=1)] + val_y = val['label'].values + test_X = [np.array([0] * len(test)), np.array([0] * len(test)), + np.expand_dims(pad_sequences(test['hist'], maxlen=maxlen), axis=1)] + test_y = test['label'].values + + # item pooling + item_pooling = np.sort(data_df['itemId'].unique().reshape(-1, 1), axis=0) + + # feature columns, dense feature columns + sparse feature columns + item_num = data_df['itemId'].max() + 1 + feature_columns = [[], + [sparseFeature('item_id', item_num, embed_dim)]] + + # behavior list + behavior_list = ['item_id'] + + print('===========Data Preprocess End=============') + + return feature_columns, behavior_list, item_pooling, (train_X, train_y), (val_X, val_y), (test_X, test_y) + + +# create_diginetica_dataset('../dataset/Diginetica/train-item-views.csv') \ No newline at end of file -- Gitee From cb847b05a4411c90b4fe6a80d5653dccf7a25633 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:21:37 +0000 Subject: [PATCH 49/54] =?UTF-8?q?super=5Fresolution=5Fsub=5Fpixel=5FID2541?= =?UTF-8?q?=5Ffor=5FTensorFlow2.X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 21 + .../README.md | 208 +++++++ .../README_BAK.md | 91 ++++ .../image_dataset.py | 254 +++++++++ .../modelzoo_level.txt | 3 + .../requirements.txt | 0 .../super_resolution_sub_pixel.py | 508 ++++++++++++++++++ .../test/train_full_1p.sh | 186 +++++++ .../test/train_performance_1p.sh | 187 +++++++ .../test/train_performance_1p_static.sh | 187 +++++++ 10 files changed, 1645 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/README_BAK.md create mode 100644 TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/image_dataset.py create mode 100644 TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/super_resolution_sub_pixel.py create mode 100644 TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test/train_performance_1p_static.sh diff --git a/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..51d555a15 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Ke YU + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..f95018552 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/README.md @@ -0,0 +1,208 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) + +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Super Resolution** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.04.21** + +**大小(Size):712K** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Research** + +**描述(Description):基于TensorFlow2.X框架的图像超分辨率重建网络训练代码** + + +

概述

+ +## 简述 + + 由Shi在2016年提出的ESPCN(Efficient Sub-Pixel CNN)是一种在给定低分辨率版本的情况下重建图像的高分辨率版本的网络模型。它利用高效的“亚像素卷积”层,学习一组图像放大滤波器。 + + + - 参考论文: + + https://arxiv.org/abs/1609.05158(https://arxiv.org/abs/1609.05158) + + - 参考实现: + https://github.com/keras-team/keras-io/blob/master/examples/vision/super_resolution_sub_pixel.py(https://github.com/keras-team/keras-io/blob/master/examples/vision/super_resolution_sub_pixel.py) + + + - 适配昇腾 AI 处理器的实现: + skip + + - 通过Git获取对应commit\_id的代码方法如下: + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + + + + +## 默认配置 + + +- 网络结构 + - Interpolation,用简单的三次样条插值进行初步的上采样,然后进行学习非线性映射 + - deconvolution,在最后的上采样层,通过学习最后的deconvolution layer。但deconvolution本质上是可以看做一种特殊的卷积,理论上后面要通过stack filters才能使得性能有更大的提升。 + - 亚像素卷积sub-pixel Layer,跟常规的卷积层相比,其输出的特征通道数为r^2,其中r为缩放倍数 + +- 训练超参(单卡): + - Batch size: 8 + - crop_size: 300 + - upscale_factor: 3 + - Train epoch: 100 + + +## 支持特性 + +| 特性列表 | 是否支持 | +|-------|------| +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +相关代码示例。 + +``` + config_proto = tf.ConfigProto(allow_soft_placement=True) + custom_op = config_proto.graph_options.rewrite_options.custom_optimizers.add() + custom_op.name = 'NpuOptimizer' + custom_op.parameter_map["use_off_line"].b = True + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") + config_proto.graph_options.rewrite_options.remapping = RewriterConfig.OFF + session_config = npu_config_proto(config_proto=config_proto) +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + + +

快速上手

+ +## 数据集准备 + +1. 模型训练使用BSDS500数据集,该数据集包含200张训练图,100张验证图,200张测试图;所有真值用.mat文件保存,包含segmentation和boundaries,每张图片对应真值有五个,为5个标注的真值,训练时真值可采用平均值或者用来扩充数据,评测代码中会依次对这五个真值都做对比。 +2. 创建训练和验证数据集image_dataset_from_directory。 +3. 重新缩放图像以获取 [0, 1] 范围内的值。 +4. 裁剪和调整图像大小,将图像从 RGB 颜色空间转换为 YUV 颜色空间。 + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + + 2. 单卡训练 + + 2.1 设置单卡训练参数(脚本位于super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test/train_full_1p.sh),示例如下。 + + + ``` + batch_size=8 + #训练step + train_epochs=100 + #训练epochs + ``` + + + + 2.2 单卡训练指令(super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test) + + ``` + 于终端中运行export ASCEND_DEVICE_ID=0 (0~7)以指定单卡训练时使用的卡 + bash train_full_1p.sh --data_path=xx + 数据集应为h5类型,配置data_path时需指定为datasets这一层,例:--data_path=/home/datasets + ├─datasets + ├─BSR + ├─bench + ├─BSDS500 + ├─documentation + + ``` + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备 + +- 模型训练 + + 请参考“快速上手”章节 + +

高级参考

+ +## 脚本和示例代码 + + ├── README.md //说明文档 + ├── requirements.txt //依赖 + ├── modelzoo_level.txt //状态文件 + ├── super_resolution_sub_pixel.py //网络结构定义脚本 + ├── test + | |—— train_full_1p.sh //单卡训练脚本 + | |—— train_performance_1p.sh //单卡训练脚本 + + +## 脚本参数 + +``` +batch_size 训练batch_size +epochs 训练epoch数 +precision_mode default="allow_mix_precision", type=str,help='the path to save over dump data' +over_dump type=ast.literal_eval,help='if or not over detection, default is False' +data_dump_flag type=ast.literal_eval,help='data dump flag, default is False' +data_dump_step data dump step, default is 10 +profiling type=ast.literal_eval help='if or not profiling for performance debug, default is False' +profiling_dump_path type=str, help='the path to save profiling data' +over_dump_path type=str, help='the path to save over dump data' +data_dump_path type=str, help='the path to save dump data' +use_mixlist type=ast.literal_eval,help='use_mixlist flag, default is False' +fusion_off_flag type=ast.literal_eval,help='fusion_off flag, default is False' +mixlist_file type=str,help='mixlist file name, default is ops_info.json' +fusion_off_file type=str,help='fusion_off file name, default is fusion_switch.cfg' +auto_tune help='auto_tune flag, default is False' +``` + +## 训练过程 + +通过“模型训练”中的训练指令启动单卡训练。 +将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 diff --git a/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/README_BAK.md b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/README_BAK.md new file mode 100644 index 000000000..2b7b82c43 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/README_BAK.md @@ -0,0 +1,91 @@ +## RL-Restore [[project page](http://mmlab.ie.cuhk.edu.hk/projects/RL-Restore/)][[paper](https://arxiv.org/abs/1804.03312)] + +:triangular_flag_on_post: Support arbitrary input size. Aug 25
+:triangular_flag_on_post: Add Python3 compatibility. Aug 6
+:triangular_flag_on_post: Training code is ready! Jun 15 + +### Overview + +- Framework + + +- Synthetic & real-world results +![](imgs/restore.gif) +

+ +

+ +### Prerequisite + +- [Anaconda](https://www.anaconda.com/download/) is highly recommended as you can easily adjust the environment setting. + ``` + pip install opencv-python scipy tqdm h5py + ``` + +- We have tested our code under the following settings:
+ + | Python | TensorFlow | CUDA | cuDNN | + | :----: | :--------: | :--: | :---: | + | 2.7 | 1.3 | 8.0 | 5.1 | + | 3.5 | 1.4 | 8.0 | 5.1 | + | 3.6 | 1.10 | 9.0 | 7.0 | + +### Test +- Start testing on synthetic dataset + ``` + python main.py --dataset moderate + ``` + > `dataset`: choose a test set among `mild`, `moderate` and `severe` + +- :heavy_exclamation_mark: Start testing on real-world data (support arbitrary input size) + ``` + python main.py --dataset mine + ``` + + - You may put your own test images in `data/test/mine/` + +- Dataset + + - All test sets can be downloaded at [Google Drive](https://drive.google.com/open?id=19z2s1e3zT8_1J9ZtsCOrzUSsrQahuINo) or [Baidu Cloud](https://pan.baidu.com/s/1RXTcfI-mne5YZh3myQcjzQ). + + - Replace `test_images/` with the downloaded data and play with the whole dataset. + +- Naming rules + + - Each saved image name refers to a selected toolchain. Please refer to my second reply in this [issue](https://github.com/yuke93/RL-Restore/issues/1). + +### Train +- Download training images + - Download training images (down-sampled DIV2K images) at [Google Drive](https://drive.google.com/file/d/146mmYHcZeWnklQ_Sg7ltCrJVqjL_yB3K/view?usp=sharing) or [Baidu Cloud](https://pan.baidu.com/s/1CD-E5dUMsMswvCVQhe5PeQ). + + - Move the downloaded file to `data/train/` and unzip. + +- Generate training data + - Run `data/train/generate_train.m` to generate training data in HDF5 format. + + - You may generate multiple `.h5` files in `data/train/` + +- Let's train! + + ``` + python main.py --is_train True + ``` + + - When you observe `reward_sum` is increasing, it indicates training is going well. + + - You can visualize reward increasing by TensorBoard. + + + +### Acknowledgement +The DQN algorithm is modified from [DQN-tensorflow](https://github.com/devsisters/DQN-tensorflow). + +### Citation + + @inproceedings{yu2018crafting, + author = {Yu, Ke and Dong, Chao and Lin, Liang and Loy, Chen Change}, + title = {Crafting a Toolchain for Image Restoration by Deep Reinforcement Learning}, + booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition}, + pages={2443--2452}, + year = {2018} + } diff --git a/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/image_dataset.py b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/image_dataset.py new file mode 100644 index 000000000..74fca2a1e --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/image_dataset.py @@ -0,0 +1,254 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras image dataset loading utilities.""" +# pylint: disable=g-classes-have-attributes + +import numpy as np + +from tensorflow.python.data.ops import dataset_ops +from tensorflow.python.keras.layers.preprocessing import image_preprocessing +from tensorflow.python.keras.preprocessing import dataset_utils +from tensorflow.python.keras.preprocessing import image as keras_image_ops +from tensorflow.python.ops import image_ops +from tensorflow.python.ops import io_ops +from tensorflow.python.util.tf_export import keras_export + + +ALLOWLIST_FORMATS = ('.bmp', '.gif', '.jpeg', '.jpg', '.png') + + +@keras_export('keras.preprocessing.image_dataset_from_directory', v1=[]) +def image_dataset_from_directory_static(directory, + labels='inferred', + label_mode='int', + class_names=None, + color_mode='rgb', + batch_size=32, + image_size=(256, 256), + shuffle=True, + seed=None, + validation_split=None, + subset=None, + interpolation='bilinear', + follow_links=False, + smart_resize=False): + """Generates a `tf.data.Dataset` from image files in a directory. + + If your directory structure is: + + ``` + main_directory/ + ...class_a/ + ......a_image_1.jpg + ......a_image_2.jpg + ...class_b/ + ......b_image_1.jpg + ......b_image_2.jpg + ``` + + Then calling `image_dataset_from_directory(main_directory, labels='inferred')` + will return a `tf.data.Dataset` that yields batches of images from + the subdirectories `class_a` and `class_b`, together with labels + 0 and 1 (0 corresponding to `class_a` and 1 corresponding to `class_b`). + + Supported image formats: jpeg, png, bmp, gif. + Animated gifs are truncated to the first frame. + + Args: + directory: Directory where the data is located. + If `labels` is "inferred", it should contain + subdirectories, each containing images for a class. + Otherwise, the directory structure is ignored. + labels: Either "inferred" + (labels are generated from the directory structure), + None (no labels), + or a list/tuple of integer labels of the same size as the number of + image files found in the directory. Labels should be sorted according + to the alphanumeric order of the image file paths + (obtained via `os.walk(directory)` in Python). + label_mode: + - 'int': means that the labels are encoded as integers + (e.g. for `sparse_categorical_crossentropy` loss). + - 'categorical' means that the labels are + encoded as a categorical vector + (e.g. for `categorical_crossentropy` loss). + - 'binary' means that the labels (there can be only 2) + are encoded as `float32` scalars with values 0 or 1 + (e.g. for `binary_crossentropy`). + - None (no labels). + class_names: Only valid if "labels" is "inferred". This is the explict + list of class names (must match names of subdirectories). Used + to control the order of the classes + (otherwise alphanumerical order is used). + color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb". + Whether the images will be converted to + have 1, 3, or 4 channels. + batch_size: Size of the batches of data. Default: 32. + image_size: Size to resize images to after they are read from disk. + Defaults to `(256, 256)`. + Since the pipeline processes batches of images that must all have + the same size, this must be provided. + shuffle: Whether to shuffle the data. Default: True. + If set to False, sorts the data in alphanumeric order. + seed: Optional random seed for shuffling and transformations. + validation_split: Optional float between 0 and 1, + fraction of data to reserve for validation. + subset: One of "training" or "validation". + Only used if `validation_split` is set. + interpolation: String, the interpolation method used when resizing images. + Defaults to `bilinear`. Supports `bilinear`, `nearest`, `bicubic`, + `area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`. + follow_links: Whether to visits subdirectories pointed to by symlinks. + Defaults to False. + smart_resize: If True, the resizing function used will be + `tf.keras.preprocessing.image.smart_resize`, which preserves the aspect + ratio of the original image by using a mixture of resizing and cropping. + If False (default), the resizing function is `tf.image.resize`, which + does not preserve aspect ratio. + + Returns: + A `tf.data.Dataset` object. + - If `label_mode` is None, it yields `float32` tensors of shape + `(batch_size, image_size[0], image_size[1], num_channels)`, + encoding images (see below for rules regarding `num_channels`). + - Otherwise, it yields a tuple `(images, labels)`, where `images` + has shape `(batch_size, image_size[0], image_size[1], num_channels)`, + and `labels` follows the format described below. + + Rules regarding labels format: + - if `label_mode` is `int`, the labels are an `int32` tensor of shape + `(batch_size,)`. + - if `label_mode` is `binary`, the labels are a `float32` tensor of + 1s and 0s of shape `(batch_size, 1)`. + - if `label_mode` is `categorial`, the labels are a `float32` tensor + of shape `(batch_size, num_classes)`, representing a one-hot + encoding of the class index. + + Rules regarding number of channels in the yielded images: + - if `color_mode` is `grayscale`, + there's 1 channel in the image tensors. + - if `color_mode` is `rgb`, + there are 3 channel in the image tensors. + - if `color_mode` is `rgba`, + there are 4 channel in the image tensors. + """ + if labels not in ('inferred', None): + if not isinstance(labels, (list, tuple)): + raise ValueError( + '`labels` argument should be a list/tuple of integer labels, of ' + 'the same size as the number of image files in the target ' + 'directory. If you wish to infer the labels from the subdirectory ' + 'names in the target directory, pass `labels="inferred"`. ' + 'If you wish to get a dataset that only contains images ' + '(no labels), pass `label_mode=None`.') + if class_names: + raise ValueError('You can only pass `class_names` if the labels are ' + 'inferred from the subdirectory names in the target ' + 'directory (`labels="inferred"`).') + if label_mode not in {'int', 'categorical', 'binary', None}: + raise ValueError( + '`label_mode` argument must be one of "int", "categorical", "binary", ' + 'or None. Received: %s' % (label_mode,)) + if labels is None or label_mode is None: + labels = None + label_mode = None + if color_mode == 'rgb': + num_channels = 3 + elif color_mode == 'rgba': + num_channels = 4 + elif color_mode == 'grayscale': + num_channels = 1 + else: + raise ValueError( + '`color_mode` must be one of {"rbg", "rgba", "grayscale"}. ' + 'Received: %s' % (color_mode,)) + interpolation = image_preprocessing.get_interpolation(interpolation) + dataset_utils.check_validation_split_arg( + validation_split, subset, shuffle, seed) + + if seed is None: + seed = np.random.randint(1e6) + image_paths, labels, class_names = dataset_utils.index_directory( + directory, + labels, + formats=ALLOWLIST_FORMATS, + class_names=class_names, + shuffle=shuffle, + seed=seed, + follow_links=follow_links) + + if label_mode == 'binary' and len(class_names) != 2: + raise ValueError( + 'When passing `label_mode="binary", there must exactly 2 classes. ' + 'Found the following classes: %s' % (class_names,)) + + image_paths, labels = dataset_utils.get_training_or_validation_split( + image_paths, labels, validation_split, subset) + if not image_paths: + raise ValueError('No images found.') + + dataset = paths_and_labels_to_dataset( + image_paths=image_paths, + image_size=image_size, + num_channels=num_channels, + labels=labels, + label_mode=label_mode, + num_classes=len(class_names), + interpolation=interpolation, + smart_resize=smart_resize) + if shuffle: + # Shuffle locally at each iteration + dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed) + dataset = dataset.batch(batch_size,drop_remainder=True) + # Users may need to reference `class_names`. + dataset.class_names = class_names + # Include file paths for images as attribute. + dataset.file_paths = image_paths + return dataset + + +def paths_and_labels_to_dataset(image_paths, + image_size, + num_channels, + labels, + label_mode, + num_classes, + interpolation, + smart_resize=False): + """Constructs a dataset of images and labels.""" + # TODO(fchollet): consider making num_parallel_calls settable + path_ds = dataset_ops.Dataset.from_tensor_slices(image_paths) + args = (image_size, num_channels, interpolation, smart_resize) + img_ds = path_ds.map( + lambda x: load_image(x, *args)) + if label_mode: + label_ds = dataset_utils.labels_to_dataset(labels, label_mode, num_classes) + img_ds = dataset_ops.Dataset.zip((img_ds, label_ds)) + return img_ds + + +def load_image(path, image_size, num_channels, interpolation, + smart_resize=False): + """Load an image from a path and resize it.""" + img = io_ops.read_file(path) + img = image_ops.decode_image( + img, channels=num_channels, expand_animations=False) + if smart_resize: + img = keras_image_ops.smart_resize(img, image_size, + interpolation=interpolation) + else: + img = image_ops.resize_images_v2(img, image_size, method=interpolation) + img.set_shape((image_size[0], image_size[1], num_channels)) + return img diff --git a/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..a829ab59b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:NOK +PrecisionStatus:OK \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/super_resolution_sub_pixel.py b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/super_resolution_sub_pixel.py new file mode 100644 index 000000000..045f7a9cc --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/super_resolution_sub_pixel.py @@ -0,0 +1,508 @@ +""" +Title: Image Super-Resolution using an Efficient Sub-Pixel CNN +Author: [Xingyu Long](https://github.com/xingyu-long) +Date created: 2020/07/28 +Last modified: 2020/08/27 +Description: Implementing Super-Resolution using Efficient sub-pixel model on BSDS500. +""" + +""" +## Introduction + +ESPCN (Efficient Sub-Pixel CNN), proposed by [Shi, 2016](https://arxiv.org/abs/1609.05158) +is a model that reconstructs a high-resolution version of an image given a low-resolution version. +It leverages efficient "sub-pixel convolution" layers, which learns an array of +image upscaling filters. + +In this code example, we will implement the model from the paper and train it on a small dataset, +[BSDS500](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/resources.html). +""" + +""" +## Setup +""" + +import npu_device +import ast +import argparse + + +def init_arg(): + parser = argparse.ArgumentParser() + parser.add_argument('--data_dir', type=str, default='datasets/BSR', help='dataset path') + parser.add_argument('--batch_size', type=int, default=8, help='batch_size') + parser.add_argument('--epochs', type=int, default=100, help='batch_size') + parser.add_argument('--precision_mode', default="allow_mix_precision", type=str, + help='the path to save over dump data') + parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') + parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') + parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') + parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval, + help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", type=str, help='the path to save profiling data') + parser.add_argument('--over_dump_path', default="/home/data", type=str, help='the path to save over dump data') + parser.add_argument('--data_dump_path', default="/home/data", type=str, help='the path to save dump data') + parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') + parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') + parser.add_argument('--mixlist_file', default="ops_info.json", type=str, + help='mixlist file name, default is ops_info.json') + parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str, + help='fusion_off file name, default is fusion_switch.cfg') + parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval, + help='auto_tune flag, default is False') + parser.add_argument('--drop_remainder', dest='drop_remainder', type=ast.literal_eval, + help='drop_remainder flag, default is False') + + return parser.parse_args() + + +args = init_arg() + + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode == 'allow_mix_precision': + npu_device.global_options().modify_mixlist = "../configs/" + args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file = "../configs/" + args.fusion_off_file + if args.auto_tune: + npu_device.global_options().auto_tune_mode = "RL,GA" + npu_device.open().as_default() + + +npu_config() + +import tensorflow as tf + +import os +import math +import numpy as np + +from tensorflow import keras +from tensorflow.keras import layers +from tensorflow.keras.preprocessing.image import load_img +from tensorflow.keras.preprocessing.image import array_to_img +from tensorflow.keras.preprocessing.image import img_to_array +from tensorflow.keras.preprocessing import image_dataset_from_directory +from image_dataset import image_dataset_from_directory_static + +from IPython.display import display + + +""" +## Load data: BSDS500 dataset + +### Download dataset + +We use the built-in `keras.utils.get_file` utility to retrieve the dataset. +""" + +data_dir = args.data_dir + '/datasets/BSR' +# dataset_url = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz" +# data_dir = keras.utils.get_file(origin=dataset_url, fname="BSR", untar=True) +root_dir = os.path.join(data_dir, "BSDS500/data") + +""" +We create training and validation datasets via `image_dataset_from_directory`. +""" + +crop_size = 300 +upscale_factor = 3 +input_size = crop_size // upscale_factor +batch_size = args.batch_size + +if args.drop_remainder: + train_ds = image_dataset_from_directory_static( + root_dir, + batch_size=batch_size, + image_size=(crop_size, crop_size), + validation_split=0.2, + subset="training", + seed=1337, + label_mode=None, + ) + + valid_ds = image_dataset_from_directory_static( + root_dir, + batch_size=batch_size, + image_size=(crop_size, crop_size), + validation_split=0.2, + subset="validation", + seed=1337, + label_mode=None, + ) +else: + train_ds = image_dataset_from_directory( + root_dir, + batch_size=batch_size, + image_size=(crop_size, crop_size), + validation_split=0.2, + subset="training", + seed=1337, + label_mode=None, + ) + + valid_ds = image_dataset_from_directory( + root_dir, + batch_size=batch_size, + image_size=(crop_size, crop_size), + validation_split=0.2, + subset="validation", + seed=1337, + label_mode=None, + ) + +""" +We rescale the images to take values in the range [0, 1]. +""" + + +def scaling(input_image): + input_image = input_image / 255.0 + return input_image + + +# Scale from (0, 255) to (0, 1) +train_ds = train_ds.map(scaling) +valid_ds = valid_ds.map(scaling) + +""" +Let's visualize a few sample images: +""" + +#for batch in train_ds.take(1): +# for img in batch: +# display(array_to_img(img)) + +""" +We prepare a dataset of test image paths that we will use for +visual evaluation at the end of this example. +""" + +dataset = os.path.join(root_dir, "images") +test_path = os.path.join(dataset, "test") + +test_img_paths = sorted( + [ + os.path.join(test_path, fname) + for fname in os.listdir(test_path) + if fname.endswith(".jpg") + ] +) + +""" +## Crop and resize images + +Let's process image data. +First, we convert our images from the RGB color space to the +[YUV colour space](https://en.wikipedia.org/wiki/YUV). + +For the input data (low-resolution images), +we crop the image, retrieve the `y` channel (luninance), +and resize it with the `area` method (use `BICUBIC` if you use PIL). +We only consider the luminance channel +in the YUV color space because humans are more sensitive to +luminance change. + +For the target data (high-resolution images), we just crop the image +and retrieve the `y` channel. +""" + + +# Use TF Ops to process. +def process_input(input, input_size, upscale_factor): + input = tf.image.rgb_to_yuv(input) + last_dimension_axis = len(input.shape) - 1 + y, u, v = tf.split(input, 3, axis=last_dimension_axis) + return tf.image.resize(y, [input_size, input_size], method="area") + + +def process_target(input): + input = tf.image.rgb_to_yuv(input) + last_dimension_axis = len(input.shape) - 1 + y, u, v = tf.split(input, 3, axis=last_dimension_axis) + return y + + +train_ds = train_ds.map( + lambda x: (process_input(x, input_size, upscale_factor), process_target(x)) +) + +train_ds = train_ds.prefetch(buffer_size=32) +valid_ds = valid_ds.map( + lambda x: (process_input(x, input_size, upscale_factor), process_target(x)) +) +valid_ds = valid_ds.prefetch(buffer_size=32) + +""" +for batch in train_ds.take(1): + for img in batch[0]: + display(array_to_img(img)) + for img in batch[1]: + display(array_to_img(img)) +""" + +def get_model(upscale_factor=3, channels=1): + conv_args = { + "activation": "relu", + "kernel_initializer": "Orthogonal", + "padding": "same", + } + inputs = keras.Input(shape=(None, None, channels)) + x = layers.Conv2D(64, 5, **conv_args)(inputs) + x = layers.Conv2D(64, 3, **conv_args)(x) + x = layers.Conv2D(32, 3, **conv_args)(x) + x = layers.Conv2D(channels * (upscale_factor ** 2), 3, **conv_args)(x) + outputs = tf.nn.depth_to_space(x, upscale_factor) + + return keras.Model(inputs, outputs) + + +import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes +from mpl_toolkits.axes_grid1.inset_locator import mark_inset +import PIL + + +def plot_results(img, prefix, title): + img_array = img_to_array(img) + img_array = img_array.astype("float32") / 255.0 + + # Create a new figure with a default 111 subplot. + fig, ax = plt.subplots() + im = ax.imshow(img_array[::-1], origin="lower") + + plt.title(title) + # zoom-factor: 2.0, location: upper-left + axins = zoomed_inset_axes(ax, 2, loc=2) + axins.imshow(img_array[::-1], origin="lower") + + # Specify the limits. + x1, x2, y1, y2 = 200, 300, 100, 200 + # Apply the x-limits. + axins.set_xlim(x1, x2) + # Apply the y-limits. + axins.set_ylim(y1, y2) + + plt.yticks(visible=False) + plt.xticks(visible=False) + + # Make the line. + mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="blue") + plt.savefig("./output/" + str(prefix) + "-" + title + ".png") + plt.show() + + +def get_lowres_image(img, upscale_factor): + return img.resize( + (img.size[0] // upscale_factor, img.size[1] // upscale_factor), + PIL.Image.BICUBIC, + ) + + +def upscale_image(model, img): + """Predict the result based on input image and restore the image as RGB.""" + ycbcr = img.convert("YCbCr") + y, cb, cr = ycbcr.split() + y = img_to_array(y) + y = y.astype("float32") / 255.0 + + input = np.expand_dims(y, axis=0) + out = model.predict(input) + + out_img_y = out[0] + out_img_y *= 255.0 + + # Restore the image in RGB color space. + out_img_y = out_img_y.clip(0, 255) + out_img_y = out_img_y.reshape((np.shape(out_img_y)[0], np.shape(out_img_y)[1])) + out_img_y = PIL.Image.fromarray(np.uint8(out_img_y), mode="L") + out_img_cb = cb.resize(out_img_y.size, PIL.Image.BICUBIC) + out_img_cr = cr.resize(out_img_y.size, PIL.Image.BICUBIC) + out_img = PIL.Image.merge("YCbCr", (out_img_y, out_img_cb, out_img_cr)).convert( + "RGB" + ) + return out_img + + +""" +## Define callbacks to monitor training + +The `ESPCNCallback` object will compute and display +the [PSNR](https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio) metric. +This is the main metric we use to evaluate super-resolution performance. +""" + + +class ESPCNCallback(keras.callbacks.Callback): + def __init__(self): + super(ESPCNCallback, self).__init__() + self.test_img = get_lowres_image(load_img(test_img_paths[0]), upscale_factor) + + # Store PSNR value in each epoch. + def on_epoch_begin(self, epoch, logs=None): + self.psnr = [] + + def on_epoch_end(self, epoch, logs=None): + print("Mean PSNR for epoch: %.2f" % (np.mean(self.psnr))) + if epoch % 20 == 0: + prediction = upscale_image(self.model, self.test_img) + # plot_results(prediction, "epoch-" + str(epoch), "prediction") + + def on_test_batch_end(self, batch, logs=None): + self.psnr.append(10 * math.log10(1 / logs["loss"])) + + +import time + + +class TimeHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps, initial_step=0): + self.batch_size = batch_size + super(TimeHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_epoch_begin(self, epoch, logs=None): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_batch_begin(self, batch, logs=None): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs=None): + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f examples/second between steps %d ' + 'and %d' % (elapsed_time, examples_per_second, self.last_log_step, + self.global_steps), flush=True) + self.last_log_step = self.global_steps + self.start_time = None + + def on_epoch_end(self, epoch, logs=None): + epoch_run_time = time.time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + + +""" +Define `ModelCheckpoint` and `EarlyStopping` callbacks. +""" + +early_stopping_callback = keras.callbacks.EarlyStopping(monitor="loss", patience=10) + +""" +checkpoint_filepath = "./ckpt/checkpoint" + +model_checkpoint_callback = keras.callbacks.ModelCheckpoint( + filepath=checkpoint_filepath, + save_weights_only=True, + monitor="loss", + mode="min", + save_best_only=True, +) +""" +model = get_model(upscale_factor=upscale_factor, channels=1) +model.summary() + +callbacks = [ESPCNCallback(), early_stopping_callback, TimeHistory(batch_size, 10)] # , model_checkpoint_callback] +loss_fn = keras.losses.MeanSquaredError() +optimizer = keras.optimizers.Adam(learning_rate=0.001) + +""" +## Train the model +""" + +epochs = args.epochs +model.compile(optimizer=optimizer, loss=loss_fn) +start_time = time.time() +model.fit( + train_ds, epochs=epochs, callbacks=callbacks, validation_data=valid_ds, verbose=2 +) + +Average_Duration = time.time() - start_time +print(f'{args.epochs} Epoch: {Average_Duration / args.epochs} s/epoch') + +# save model +# model.save_weights(checkpoint_filepath) +# The model weights (that are considered the best) are loaded into the model. +# model.load_weights(checkpoint_filepath) + +""" +## Run model prediction and plot the results + +Let's compute the reconstructed version of a few images and save the results. +""" + +total_bicubic_psnr = 0.0 +total_test_psnr = 0.0 + +for index, test_img_path in enumerate(test_img_paths[50:60]): + img = load_img(test_img_path) + lowres_input = get_lowres_image(img, upscale_factor) + w = lowres_input.size[0] * upscale_factor + h = lowres_input.size[1] * upscale_factor + highres_img = img.resize((w, h)) + prediction = upscale_image(model, lowres_input) + lowres_img = lowres_input.resize((w, h)) + lowres_img_arr = img_to_array(lowres_img) + highres_img_arr = img_to_array(highres_img) + predict_img_arr = img_to_array(prediction) + bicubic_psnr = tf.image.psnr(lowres_img_arr, highres_img_arr, max_val=255) + test_psnr = tf.image.psnr(predict_img_arr, highres_img_arr, max_val=255) + + total_bicubic_psnr += bicubic_psnr + total_test_psnr += test_psnr + + print( + "PSNR of low resolution image and high resolution image is %.4f" % bicubic_psnr + ) + print("PSNR of predict and high resolution is %.4f" % test_psnr) + plot_results(lowres_img, index, "lowres") + plot_results(highres_img, index, "highres") + plot_results(prediction, index, "prediction") + +print("Avg. PSNR of lowres images is %.4f" % (total_bicubic_psnr / 10)) +print("Avg. PSNR of reconstructions is %.4f" % (total_test_psnr / 10)) diff --git a/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..22e70688d --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,186 @@ +#!/bin/bash + +cur_path=`pwd`/../ +#失败用例打屏 +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=8 +#网络名称,同目录名称 +Network="super_resolution_sub_pixel_ID2541_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=100 +#训练step +#train_steps=5 +#学习率 +#learning_rate=1e-4 +#ckpt_path="" +#参数配置 +data_path="" +#work_dir="$cur_path/estimator_working_dir" +#export_path="$cur_path/outputs/models/000001-first_generation" +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="ops_info.json" +fusion_off_flag=False +fusion_off_file="fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage:./train_performance_1p.sh --data_path=$data_path" + exit 1 +fi + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/test/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/test/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/test/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + + fi +done +############维测参数############## + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + +cd $cur_path/ +mkdir output +mkdir ckpt + +##############执行训练########## +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/$ASCEND_DEVICE_ID + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + + +start=$(date +%s) +nohup python3 super_resolution_sub_pixel.py \ + --data_dir=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait +end=$(date +%s) +e2e_time=$(( $end - $start )) + + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +FPS=`grep TimeHistory $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| tail -n +2|awk '{print $4}'|awk '{sum+=$1} END {print"",sum/NR}'|sed s/[[:space:]]//g` +wait +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${FPS}'}'` + +#cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep val_loss|awk '{print $3}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +train_accuracy=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|grep PSNR|awk '{print $6}'|tail -1` +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +###下面字段用于冒烟看护 +BatchSize=${batch_size} +#设备类型,自动获取 +DeviceType=`uname -m` +#用例名称,自动获取 +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' + +#steps="50" +#time=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|grep val_loss|awk '{print $3}'|tail -n1 |awk -F 's' '{print $1}'` +#step_per_s=`awk 'BEGIN{printf "%2f\n",'${steps}'/'${time}'}'` +#FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*'${step_per_s}'}'` + +echo "Final Performance images/sec : $FPS" + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 + +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep loss|awk '{print $6}'|sed s/[[:space:]]//g > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +##获取错误信息 +#系统错误信息 +#ModelStatus="图执行FAIL" +#error_msg="EZ3002" +#判断错误信息是否和历史状态一致,此处无需修改 +#error_msg="Graph engine process graph failed: EZ3002: Optype \[Conv2DBackpropFilter\] of Ops kernel" +#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#DTS单号或者issue链接 +#DTS_Number="DTS2021090622224" + +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${FPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..b15fdedba --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,187 @@ +#!/bin/bash + +cur_path=`pwd`/../ +#失败用例打屏 +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=8 +#网络名称,同目录名称 +Network="super_resolution_sub_pixel_ID2541_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=2 +#训练step +#train_steps=5 +#学习率 +#learning_rate=1e-4 +#ckpt_path="" +#参数配置 +data_path="" +#work_dir="$cur_path/estimator_working_dir" +#export_path="$cur_path/outputs/models/000001-first_generation" +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="ops_info.json" +fusion_off_flag=False +fusion_off_file="fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage:./train_performance_1p.sh --data_path=$data_path" + exit 1 +fi + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/test/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/test/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/test/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + + fi +done +############维测参数############## + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + +cd $cur_path/ +mkdir output +mkdir ckpt + +##############执行训练########## +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/$ASCEND_DEVICE_ID + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + + +start=$(date +%s) +nohup python3 super_resolution_sub_pixel.py \ + --data_dir=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune} \ + --drop_remainder=False > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait +end=$(date +%s) +e2e_time=$(( $end - $start )) + + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +FPS=`grep TimeHistory $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| tail -n +2|awk '{print $4}'|awk '{sum+=$1} END {print"",sum/NR}'|sed s/[[:space:]]//g` +wait +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${FPS}'}'` + +#cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep val_loss|awk '{print $3}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +#train_accuracy=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|grep PSNR|grep epoch|awk '{print $5}'|tail -n1` +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +###下面字段用于冒烟看护 +BatchSize=${batch_size} +#设备类型,自动获取 +DeviceType=`uname -m` +#用例名称,自动获取 +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +#steps="50" +#time=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|grep val_loss|awk '{print $3}'|tail -n1 |awk -F 's' '{print $1}'` +#step_per_s=`awk 'BEGIN{printf "%2f\n",'${steps}'/'${time}'}'` +#FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*'${step_per_s}'}'` + +echo "Final Performance images/sec : $FPS" + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 + +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep loss|awk '{print $6}'|sed s/[[:space:]]//g > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +##获取错误信息 +#系统错误信息 +#ModelStatus="图执行FAIL" +#error_msg="EZ3002" +#判断错误信息是否和历史状态一致,此处无需修改 +#error_msg="Graph engine process graph failed: EZ3002: Optype \[Conv2DBackpropFilter\] of Ops kernel" +#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#DTS单号或者issue链接 +#DTS_Number="DTS2021090622224" + +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${FPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test/train_performance_1p_static.sh new file mode 100644 index 000000000..8adbcb65c --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/super_resolution_sub_pixel_ID2541_for_TensorFlow2.X/test/train_performance_1p_static.sh @@ -0,0 +1,187 @@ +#!/bin/bash + +cur_path=`pwd`/../ +#失败用例打屏 +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 + +#基础参数,需要模型审视修改 +#Batch Size +batch_size=8 +#网络名称,同目录名称 +Network="super_resolution_sub_pixel_ID2541_for_TensorFlow2.X" +#Device数量,单卡默认为1 +RANK_SIZE=1 +#训练epoch,可选 +train_epochs=2 +#训练step +#train_steps=5 +#学习率 +#learning_rate=1e-4 +#ckpt_path="" +#参数配置 +data_path="" +#work_dir="$cur_path/estimator_working_dir" +#export_path="$cur_path/outputs/models/000001-first_generation" +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="ops_info.json" +fusion_off_flag=False +fusion_off_file="fusion_switch.cfg" +auto_tune=False +############维测参数############## + +if [[ $1 == --help || $1 == --h ]];then + echo "usage:./train_performance_1p.sh --data_path=$data_path" + exit 1 +fi + +############维测参数############## +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/test/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/test/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/test/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + + fi +done +############维测参数############## + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + +cd $cur_path/ +mkdir output +mkdir ckpt + +##############执行训练########## +if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/$ASCEND_DEVICE_ID + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID +fi +wait + + +start=$(date +%s) +nohup python3 super_resolution_sub_pixel.py \ + --data_dir=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --drop_remainder=True \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +wait +end=$(date +%s) +e2e_time=$(( $end - $start )) + + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +FPS=`grep TimeHistory $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log| tail -n 3|awk '{print $4}'|awk '{sum+=$1} END {print"",sum/NR}'|sed s/[[:space:]]//g` +wait +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'/'${FPS}'}'` + +#cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep val_loss|awk '{print $3}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +#train_accuracy=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|grep PSNR|grep epoch|awk '{print $5}'|tail -n1` +#echo "Final Performance ms/step : $average_perf" +echo "Final Training Duration sec : $e2e_time" + +###下面字段用于冒烟看护 +BatchSize=${batch_size} +#设备类型,自动获取 +DeviceType=`uname -m` +#用例名称,自动获取 +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' + +#steps="50" +#time=`cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|grep val_loss|awk '{print $3}'|tail -n1 |awk -F 's' '{print $1}'` +#step_per_s=`awk 'BEGIN{printf "%2f\n",'${steps}'/'${time}'}'` +#FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*'${step_per_s}'}'` + +echo "Final Performance images/sec : $FPS" + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 + +cat $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log |grep val_loss|awk '{print $6}'|sed s/[[:space:]]//g > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` +##获取错误信息 +#系统错误信息 +#ModelStatus="图执行FAIL" +#error_msg="EZ3002" +#判断错误信息是否和历史状态一致,此处无需修改 +#error_msg="Graph engine process graph failed: EZ3002: Optype \[Conv2DBackpropFilter\] of Ops kernel" +#Status=`grep "${error_msg}" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|wc -l` +#DTS单号或者issue链接 +#DTS_Number="DTS2021090622224" + +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}_static" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${FPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "ModelStatus = ${ModelStatus}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "DTS_Number = ${DTS_Number}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "Status = ${Status}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +#echo "error_msg = ${error_msg}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file -- Gitee From e240a77f3753c00a48f428fbc7f2c380222cb1f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:21:53 +0000 Subject: [PATCH 50/54] =?UTF-8?q?text=5Fclassification=5Fwith=5Ftransforme?= =?UTF-8?q?r=5FID2563=5Ffor=5FTensorFlow2.X=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../LICENSE | 284 +++++++++++++++ .../README.md | 192 ++++++++++ .../npu_convert_dropout.py | 54 +++ .../npu_ops.py | 256 ++++++++++++++ .../requirements.txt | 0 .../test/train_full_1p.sh | 178 ++++++++++ .../test/train_performance_1p.sh | 178 ++++++++++ .../test/train_performance_1p_dynamic.sh | 179 ++++++++++ .../test/train_performance_1p_static.sh | 178 ++++++++++ .../text_classification_with_transformer.py | 328 ++++++++++++++++++ 10 files changed, 1827 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/npu_convert_dropout.py create mode 100644 TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/npu_ops.py create mode 100644 TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_performance_1p_dynamic.sh create mode 100644 TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_performance_1p_static.sh create mode 100644 TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/text_classification_with_transformer.py diff --git a/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..367d52351 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/README.md @@ -0,0 +1,192 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain): Natural Lannguage Processing** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.02.16** + +**大小(Size):90M** + +**框架(Framework):TensorFlow_2.4.1** + +**模型格式(Model Format):ckpt** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Research** + +**描述(Description):使用transformer进行文本分类** + +

概述

+ + Transformer是一个能够高效并行训练的序列到序列模型,该模型分为编码器(Encoder)和解码器(Decoder)两个部分,主要由多头注意力(Multi-head Attention)网络和前向(FeedForward)网络组成, 同时集成了位置编码(Position Encoding)、残差连接(Residual Connection)、层归一化(Layer Normalization)等多种技术。相比循环和卷积网络,注意力网络可以同时对任意两个输入位置的向量进行运算,提高了计算效率。 在包括翻译的多个自然语言处理任务上,该网络都取得了显著的提升 + +- 参考论文: + + [Ashish Vaswani, Noam Shazeer, Niki Parmar, JakobUszkoreit, Llion Jones, Aidan N Gomez, Ł ukaszKaiser, and Illia Polosukhin. 2017. Attention is all you need. In NIPS 2017, pages 5998–6008.](https://arxiv.org/abs/1706.03762) + +- 参考实现: + + https://github.com/keras-team/keras-io/blob/master/examples/nlp/text_classification_with_transformer.py + +- 适配昇腾 AI 处理器的实现: + + https://gitee.com/ascend/modelzoo/tree/master/built-in/TensorFlow2/Research/nlp/text_classification_with_transformer_ID2563_for_TensorFlow2.X + +- 通过Git获取对应commit_id的代码方法如下: + + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + +## 默认配置 + +- 主要训练超参(单卡): + - batch_size: 32 + - epochs: 2 + - lr: 0.001 + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 + + +``` + npu_device.global_options().precision_mode='allow_mix_precision' + npu_device.open().as_default()) +``` + + +

训练环境准备

+ +1. 硬件环境准备请参见各硬件产品文档"[驱动和固件安装升级指南]( https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909)"。需要在硬件设备上安装与CANN版本配套的固件与驱动。 +2. 宿主机上需要安装Docker并登录[Ascend Hub中心](https://ascendhub.huawei.com/#/detail?name=ascend-tensorflow-arm)获取镜像。 + + 当前模型支持的镜像列表如[表1](#zh-cn_topic_0000001074498056_table1519011227314)所示。 + + **表 1** 镜像列表 + + + + + + + + + + + + +

镜像名称

+

镜像版本

+

配套CANN版本

+
+

21.0.2

+

5.0.2

+
+ + +

快速上手

+ +## 数据集准备 + +1. 用户需自行下载keras.datasets.imdb训练数据集。 + +## 模型训练 + +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + 2. 单卡训练 + + 2.1 配置train_full_1p.sh脚本中`data_path`(脚本路径text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_full_1p.sh),请用户根据实际路径配置,数据集参数如下所示: + + --data_path=/path/to/dataset/imdb.npz + + 2.2 1p指令如下: + + bash train_full_1p.sh --data_path=/path/to/dataset/imdb.npz + + +

迁移学习指导

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备。 + +- 模型训练。 + + 参考“模型训练”中训练步骤。 + +- 模型评估。 + + 参考“模型训练”中验证步骤。 + +

高级参考

+ +## 脚本和示例代码 + +``` +text_classification_with_transformer_ID2563_for_TensorFlow2.X/ +├── LICENSE +├── modelzoo_level.txt +├── README.md +├── requirements.txt +├── test +│   ├── train_full_1p.sh +│   ├── train_performance_1p.sh +└── text_classification_with_transformer.py + +``` + +## 脚本参数 + +``` +--data_dir 训练数据集路径 +--epochs 训练epoch设置 +--batch_size 训练bs设置 +``` + +## 训练过程 + +1. 通过“模型训练”中的训练指令启动单卡训练。 +2. 将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 +3. 模型存储路径为“curpath/output/ASCEND_DEVICE_ID”,包括训练的log文件。 +4. 以多卡训练为例,loss信息在文件curpath/output/{ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。 + +## 推理/验证过程 + +``` + NA + +``` diff --git a/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/npu_convert_dropout.py b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/npu_convert_dropout.py new file mode 100644 index 000000000..95f8689ce --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/npu_convert_dropout.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from keras import backend +from keras.utils import control_flow_util +from keras.layers.core import Dropout +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import nn +import npu_ops + +def dropout_call(self, inputs, training=None): + """Make Keras Dropout to execute NPU dropout""" + if training is None: + training = backend.learning_phase() + + def dropped_inputs(): + return npu_ops.dropout( + inputs, + noise_shape=self._get_noise_shape(inputs), + seed=self.seed, + keep_prob=1 - self.rate) + + output = control_flow_util.smart_cond(training, + dropped_inputs, + lambda : array_ops.identity(inputs)) + + return output + +Dropout.call = dropout_call diff --git a/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/npu_ops.py b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/npu_ops.py new file mode 100644 index 000000000..fa6f8f211 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/npu_ops.py @@ -0,0 +1,256 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Ops for collective operations implemented using hccl.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import numbers +from tensorflow.python.ops import array_ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.framework import ops +from tensorflow.python.eager import context + +from npu_device import gen_npu_ops + + +DEFAULT_GRAPH_SEED = 87654321 +_MAXINT32 = 2**31 - 1 +def LARSV2(input_weight, + input_grad, + weight_decay, + learning_rate, + hyperpara=0.001, + epsilon=0.00001, + use_clip=False, + name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.LARSV2() is not compatible with " + "eager execution.") + + return gen_npu_ops.lars_v2(input_weight=input_weight, + input_grad=input_grad, + weight_decay=weight_decay, + learning_rate=learning_rate, + hyperpara=hyperpara, + epsilon=epsilon, + use_clip=use_clip, + name=name) + + +def _truncate_seed(seed): + return seed % _MAXINT32 # Truncate to fit into 32-bit integer + +def get_seed(op_seed): + global_seed = ops.get_default_graph().seed + + if global_seed is not None: + if op_seed is None: + op_seed = ops.get_default_graph()._last_id + + seeds = _truncate_seed(global_seed), _truncate_seed(op_seed) + else: + if op_seed is not None: + seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed) + else: + seeds = None, None + # Avoid (0, 0) as the C++ ops interpret it as nondeterminism, which would + # be unexpected since Python docs say nondeterminism is (None, None). + if seeds == (0, 0): + return (0, _MAXINT32) + return seeds + +def _get_noise_shape(x, noise_shape): + # If noise_shape is none return immediately. + if noise_shape is None: + return array_ops.shape(x) + + try: + # Best effort to figure out the intended shape. + # If not possible, let the op to handle it. + # In eager mode exception will show up. + noise_shape_ = tensor_shape.as_shape(noise_shape) + except (TypeError, ValueError): + return noise_shape + + if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims): + new_dims = [] + for i, dim in enumerate(x.shape.dims): + if noise_shape_.dims[i].value is None and dim.value is not None: + new_dims.append(dim.value) + else: + new_dims.append(noise_shape_.dims[i].value) + return tensor_shape.TensorShape(new_dims) + + return noise_shape + +def dropout(x, keep_prob, noise_shape=None, seed=None, name=None): + """The gradient for `gelu`. + + Args: + x: A tensor with type is float. + keep_prob: A tensor, float, rate of every element reserved. + noise_shape: A 1-D tensor, with type int32, shape of keep/drop what random + generated. + seed: Random seed. + name: Layer name. + + Returns: + A tensor. + """ + if context.executing_eagerly(): + raise RuntimeError("tf.dropout() is not compatible with " + "eager execution.") + x = ops.convert_to_tensor(x, name="x") + if not x.dtype.is_floating: + raise ValueError("x has to be a floating point tensor since it's going to" + " be scaled. Got a %s tensor instead." % x.dtype) + if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1: + raise ValueError("keep_prob must be a scalar tensor or a float in the " + "range (0, 1], got %g" % keep_prob) + if isinstance(keep_prob, float) and keep_prob == 1: + return x + seed, seed2 = get_seed(seed) + noise_shape = _get_noise_shape(x, noise_shape) + gen_out = gen_npu_ops.drop_out_gen_mask(noise_shape, keep_prob, seed, seed2, name) + result = gen_npu_ops.drop_out_do_mask(x, gen_out, keep_prob, name) + return result + +@ops.RegisterGradient("DropOutDoMask") +def _DropOutDoMaskGrad(op, grad): + result = gen_npu_ops.drop_out_do_mask(grad, op.inputs[1], op.inputs[2]) + return [result, None, None] + +def basic_lstm_cell(x, h, c, w, b, keep_prob, forget_bias, state_is_tuple, + activation, name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.basic_lstm_cell() is not compatible with " + "eager execution.") + x = ops.convert_to_tensor(x, name="x") + h = ops.convert_to_tensor(h, name="h") + c = ops.convert_to_tensor(c, name="c") + w = ops.convert_to_tensor(w, name="w") + b = ops.convert_to_tensor(b, name="b") + result = gen_npu_ops.basic_lstm_cell(x, h, c, w, b, keep_prob, forget_bias, state_is_tuple, + activation, name) + return result + +@ops.RegisterGradient("BasicLSTMCell") +def basic_lstm_cell_grad(op, dct, dht, dit, djt, dft, dot, dtanhct): + + dgate, dct_1 = gen_npu_ops.basic_lstm_cell_c_state_grad(op.inputs[2], dht, dct, op.outputs[2], op.outputs[3], op.outputs[4], op.outputs[5], op.outputs[6], forget_bias=op.get_attr("forget_bias"), activation=op.get_attr("activation")) + dw, db = gen_npu_ops.basic_lstm_cell_weight_grad(op.inputs[0], op.inputs[1], dgate) + dxt, dht = gen_npu_ops.basic_lstm_cell_input_grad(dgate, op.inputs[3], keep_prob=op.get_attr("keep_prob")) + + return [dxt, dht, dct_1, dw, db] + +def adam_apply_one_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, add2_y, name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.adam_apply_one_assign() is not compatible with " + "eager execution.") + result = gen_npu_ops.adam_apply_one_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, add2_y,name) + return result + +def adam_apply_one_with_decay_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y, name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.adam_apply_one_with_decay_assign() is not compatible with " + "eager execution.") + result = gen_npu_ops.adam_apply_one_with_decay_assign(input0, input1, input2, input3, input4, + mul0_x, mul1_x, mul2_x, mul3_x, mul4_x, add2_y, name) + return result + +@ops.RegisterGradient("DynamicGruV2") +def dynamic_gru_v2_grad(op, dy, doutput_h, dupdate, dreset, dnew, dhidden_new): + (x, weight_input, weight_hidden, bias_input, bias_hidden, seq_length, init_h) = op.inputs + (y, output_h, update, reset, new, hidden_new) = op.outputs + (dw_input, dw_hidden, db_input, db_hidden, dx, dh_prev) = gen_npu_ops.dynamic_gru_v2_grad(x, weight_input, weight_hidden, y, init_h, output_h, dy, doutput_h, update, reset, new, hidden_new, direction=op.get_attr("direction"), cell_depth=op.get_attr("cell_depth"), keep_prob=op.get_attr("keep_prob"), cell_clip=op.get_attr("cell_clip"), num_proj=op.get_attr("num_proj"), time_major=op.get_attr("time_major"), gate_order=op.get_attr("gate_order"), reset_after=op.get_attr("reset_after")) + + return (dx, dw_input, dw_hidden, db_input, db_hidden, seq_length, dh_prev) + +@ops.RegisterGradient("DynamicRnn") +def dynamic_rnn_grad(op, dy, dh, dc, di, dj, df, do, dtanhc): + (x, w, b, seq_length, init_h, init_c) = op.inputs + (y, output_h, output_c, i, j, f, o, tanhc) = op.outputs + (dw, db, dx, dh_prev, dc_prev) = gen_npu_ops.dynamic_rnn_grad(x, w, b, y, init_h[-1], init_c[-1], output_h, output_c, dy, dh[-1], dc[-1], i, j, f, o, tanhc, cell_type=op.get_attr("cell_type"), direction=op.get_attr("direction"), cell_depth=op.get_attr("cell_depth"), use_peephole=op.get_attr("use_peephole"), keep_prob=op.get_attr("keep_prob"), cell_clip=op.get_attr("cell_clip"), num_proj=op.get_attr("num_proj"), time_major=op.get_attr("time_major"), forget_bias=op.get_attr("forget_bias")) + + return (dx, dw, db, seq_length, dh_prev, dc_prev) + +def lamb_apply_optimizer_assign(input0,input1,input2,input3,mul0_x,mul1_x,mul2_x, + mul3_x,add2_y,steps,do_use_weight,weight_decay_rate,name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.lamb_apply_optimizer_assign() is not compatible with eager execution") + update,nextv,nextm=gen_npu_ops.lamb_apply_optimizer_assign(input0,input1,input2,input3,mul0_x,mul1_x,mul2_x, + mul3_x,add2_y,steps,do_use_weight,weight_decay_rate,name) + return update,nextv,nextm + +def lamb_apply_weight_assign(input0,input1,input2,input3,input4,name=None): + if context.executing_eagerly(): + raise RuntimeError("tf.lamb_apply_weight_assign() is not compatible with eager execution") + result = gen_npu_ops.lamb_apply_weight_assign(input0,input1,input2,input3,input4,name) + return result + +def dropout_v3(x, keep_prob, noise_shape=None, seed=None, name=None): + """ The gradient for gelu + + Args: + x: A tensor with type is float + keep_prob: A tensor, float, rate of every element reserved + noise_shape: A 1-D tensor, with type int32, shape of keep/drop what random generated. + seed: Random seed. + name: Layer name. + + Returns: + A tensor. + """ + x = ops.convert_to_tensor(x,name="x") + if not x.dtype.is_floating: + raise ValueError("x has to be a floating point tensor since it's going to be scaled. Got a %s tensor instead." % x.dtype) + + if isinstance(keep_prob,numbers.Real) and not 0 < keep_prob <=1: + raise ValueError("Keep_prob must be a scalar tensor or a float in the range (0,1], got %g" % keep_prob) + + if isinstance(keep_prob,float) and keep_prob==1: + return x + + seed, seed2 = get_seed(seed) + noise_shape = _get_noise_shape(x,noise_shape) + gen_out = gen_npu_ops.drop_out_gen_mask_v3(noise_shape,keep_prob,seed,seed2,name) + result = gen_npu_ops.drop_out_do_mask_v3(x, gen_out, keep_prob, name) + return result + +@ops.RegisterGradient("DropOutDoMaskV3") +def _DropOutDoMaskV3Grad(op,grad): + result = gen_npu_ops.drop_out_do_mask_v3(grad, op.inputs[1], op.inputs[2]) + return [result, None, None] \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..bb931cf8d --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,178 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd`/../ + +#集合通信参数,不需要修改 +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=$ASCEND_DEVICE_ID + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="text_classification_with_transformer_ID2563_for_TensorFlow2.X" +#训练epoch +train_epochs=2 +#训练batch_size +batch_size=32 + +#维测参数,precision_mode需要模型审视修改 +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="ops_info.json" +fusion_off_flag=False +fusion_off_file="fusion_switch.cfg" + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID + else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID + fi + + + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 text_classification_with_transformer.py --data_dir=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +wait +FPS=`grep imgs/s $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $4}'` +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*1000/'${FPS}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep " val_accuracy" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}p_acc + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep ",loss:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $3}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..42b3ff4e8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,178 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd`/../ + +#集合通信参数,不需要修改 +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=$ASCEND_DEVICE_ID + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="text_classification_with_transformer_ID2563_for_TensorFlow2.X" +#训练epoch +train_epochs=2 +#训练batch_size +batch_size=32 + +#维测参数,precision_mode需要模型审视修改 +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="ops_info.json" +fusion_off_flag=False +fusion_off_file="fusion_switch.cfg" + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID + else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID + fi + + + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 text_classification_with_transformer.py --data_dir=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +wait +FPS=`grep imgs/s $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $4}'` +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*1000/'${FPS}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep " val_accuracy" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}p_perf + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep ",loss:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $3}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_performance_1p_dynamic.sh b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_performance_1p_dynamic.sh new file mode 100644 index 000000000..c979030a8 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_performance_1p_dynamic.sh @@ -0,0 +1,179 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd`/../ + +#集合通信参数,不需要修改 +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=$ASCEND_DEVICE_ID + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="text_classification_with_transformer_ID2563_for_TensorFlow2.X" +#训练epoch +train_epochs=2 +#训练batch_size +batch_size=32 + +#维测参数,precision_mode需要模型审视修改 +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="ops_info.json" +fusion_off_flag=False +fusion_off_file="fusion_switch.cfg" + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID + else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID + fi + + + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 text_classification_with_transformer.py --data_dir=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --static=0 \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +wait +FPS=`grep imgs/s $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $4}'` +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*1000/'${FPS}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep " val_accuracy" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}p_perf + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep ",loss:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $3}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_performance_1p_static.sh b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_performance_1p_static.sh new file mode 100644 index 000000000..129098ad5 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/test/train_performance_1p_static.sh @@ -0,0 +1,178 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd`/../ + +#集合通信参数,不需要修改 +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=$ASCEND_DEVICE_ID + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="text_classification_with_transformer_ID2563_for_TensorFlow2.X" +#训练epoch +train_epochs=2 +#训练batch_size +batch_size=32 + +#维测参数,precision_mode需要模型审视修改 +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=False +mixlist_file="ops_info.json" +fusion_off_flag=False +fusion_off_file="fusion_switch.cfg" + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/ + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + if [ -d $cur_path/test/output ];then + rm -rf $cur_path/test/output/* + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID + else + mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID + fi + + + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + nohup python3 text_classification_with_transformer.py --data_dir=$data_path \ + --epochs=$train_epochs \ + --batch_size=$batch_size \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#conda deactivate +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +wait +FPS=`grep imgs/s $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $4}'` +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*1000/'${FPS}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep " val_accuracy" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk 'END {print $NF}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" + + +#精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}p_perf + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=${TrainingTime} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep ",loss:" $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk '{print $3}' >> $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print $1}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}_static" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/text_classification_with_transformer.py b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/text_classification_with_transformer.py new file mode 100644 index 000000000..e05d2cdb0 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/text_classification_with_transformer.py @@ -0,0 +1,328 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Title: Text classification with Transformer +Author: [Apoorv Nandan](https://twitter.com/NandanApoorv) +Date created: 2020/05/10 +Last modified: 2020/05/10 +Description: Implement a Transformer block as a Keras layer and use it for text classification. +""" +""" +## Setup +""" +import os +import time +import numpy as np +import tensorflow as tf +from tensorflow import keras +from tensorflow.keras import layers +import npu_device +import npu_convert_dropout +import argparse +import ast +#===============================NPU Migration========================================= +parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) +parser.add_argument('--precision_mode', default="allow_mix_precision", type=str,help='the path to save over dump data') +parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') +parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') +parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') +parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') +parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') +parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') +parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') +parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') +parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') +parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') +parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') +parser.add_argument('--data_dir', default='./',help="""directory to data""") +parser.add_argument('--batch_size', default=32, type=int,help="""batch size for 1p""") +parser.add_argument('--epochs', default=2, type=int,help="""epochs""") +parser.add_argument('--static', default=1, type=int,help="""static shape""") +args = parser.parse_args() + +def npu_config(): + if args.data_dump_flag: + npu_device.global_options().dump_config.enable_dump = True + npu_device.global_options().dump_config.dump_path = args.data_dump_path + npu_device.global_options().dump_config.dump_step = args.data_dump_step + npu_device.global_options().dump_config.dump_mode = "all" + + if args.over_dump: + npu_device.global_options().dump_config.enable_dump_debug = True + npu_device.global_options().dump_config.dump_path = args.over_dump_path + npu_device.global_options().dump_config.dump_debug_mode = "all" + + if args.profiling: + npu_device.global_options().profiling_config.enable_profiling = True + profiling_options = '{"output":"' + args.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + npu_device.global_options().profiling_config.profiling_options = profiling_options + npu_device.global_options().precision_mode = args.precision_mode + if args.use_mixlist and args.precision_mode=='allow_mix_precision': + npu_device.global_options().modify_mixlist="../configs/"+args.mixlist_file + if args.fusion_off_flag: + npu_device.global_options().fusion_switch_file="../configs/"+args.fusion_off_file + npu_device.open().as_default() +#===============================NPU Migration========================================= +npu_config() +data_dir = args.data_dir + +""" +## Implement a Transformer block as a layer +""" +class TransformerBlock(layers.Layer): + def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1): + super(TransformerBlock, self).__init__() + self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim) + self.ffn = keras.Sequential( + [layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),] + ) + self.layernorm1 = layers.LayerNormalization(epsilon=1e-6) + self.layernorm2 = layers.LayerNormalization(epsilon=1e-6) + self.dropout1 = layers.Dropout(rate) + self.dropout2 = layers.Dropout(rate) + + def call(self, inputs, training): + attn_output = self.att(inputs, inputs) + attn_output = self.dropout1(attn_output, training=training) + out1 = self.layernorm1(inputs + attn_output) + ffn_output = self.ffn(out1) + ffn_output = self.dropout2(ffn_output, training=training) + return self.layernorm2(out1 + ffn_output) + + +""" +## Implement embedding layer + +Two seperate embedding layers, one for tokens, one for token index (positions). +""" + + +class TokenAndPositionEmbedding(layers.Layer): + def __init__(self, maxlen, vocab_size, embed_dim): + super(TokenAndPositionEmbedding, self).__init__() + self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim) + self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim) + + def call(self, x): + maxlen = tf.shape(x)[-1] + positions = tf.range(start=0, limit=maxlen, delta=1) + positions = self.pos_emb(positions) + x = self.token_emb(x) + return x + positions + +class LossHistory(tf.keras.callbacks.Callback): + def __init__(self, batch_size, log_steps=50, initial_step=0): + self.batch_size = batch_size + super(LossHistory, self).__init__() + self.steps_before_epoch = initial_step + self.last_log_step = initial_step + self.log_steps = log_steps + self.steps_in_epoch = 0 + self.start_time = None + + @property + def global_steps(self): + """The current 1-indexed global step.""" + return self.steps_before_epoch + self.steps_in_epoch + + def on_batch_begin(self, batch, logs={}): + if not self.start_time: + self.start_time = time.time() + + def on_batch_end(self, batch, logs={}): + loss = logs.get('loss') + self.steps_in_epoch = batch + 1 + steps_since_last_log = self.global_steps - self.last_log_step + if steps_since_last_log >= self.log_steps: + now = time.time() + elapsed_time = now - self.start_time + steps_per_second = steps_since_last_log / elapsed_time + examples_per_second = steps_per_second * self.batch_size + print( + 'TimeHistory: %.2f seconds, %.2f imgs/s between steps %d ' + 'and %d'%(elapsed_time, examples_per_second, self.last_log_step, + self.global_steps),flush=True) + self.last_log_step = self.global_steps + self.start_time = None + print('step:%d ,loss: %f '%(batch, loss), flush=True) + + def on_epoch_begin(self, epoch, logs={}): + if not self.start_time: + self.start_time = time.time() + self.epoch_start = time.time() + + def on_epoch_end(self, epoch, logs={}): + epoch_run_time = time.time() - self.epoch_start + self.steps_before_epoch += self.steps_in_epoch + self.steps_in_epoch = 0 + +logger = LossHistory(args.batch_size) + +""" +## Download and prepare dataset +""" + +vocab_size = 20000 # Only consider the top 20k words +maxlen = 200 # Only consider the first 200 words of each movie review +#(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(num_words=vocab_size) +def load_data(path='imdb.npz', + num_words=None, + skip_top=0, + maxlen=None, + seed=113, + start_char=1, + oov_char=2, + index_from=3, + static=1, + **kwargs): + # Legacy support + if 'nb_words' in kwargs: + logging.warning('The `nb_words` argument in `load_data` ' + 'has been renamed `num_words`.') + num_words = kwargs.pop('nb_words') + if kwargs: + raise TypeError(f'Unrecognized keyword arguments: {str(kwargs)}.') + + # origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/' + # path = get_file( + # path, + # origin=origin_folder + 'imdb.npz', + # fi le_hash= + # '69664113be75683a8fe16e3ed0ab59fda8886cb3cd7ada244f7d9544e4676b9f') + with np.load(path, allow_pickle=True) as f: # pylint: disable=unexpected-keyword-arg + x_train, labels_train = f['x_train'], f['y_train'] + x_test, labels_test = f['x_test'], f['y_test'] + + rng = np.random.RandomState(seed) + indices = np.arange(len(x_train)) + rng.shuffle(indices) + x_train = x_train[indices] + labels_train = labels_train[indices] + + indices = np.arange(len(x_test)) + rng.shuffle(indices) + x_test = x_test[indices] + labels_test = labels_test[indices] + + if start_char is not None: + x_train = [[start_char] + [w + index_from for w in x] for x in x_train] + x_test = [[start_char] + [w + index_from for w in x] for x in x_test] + elif index_from: + x_train = [[w + index_from for w in x] for x in x_train] + x_test = [[w + index_from for w in x] for x in x_test] + + if maxlen: + x_train, labels_train = _remove_long_seq(maxlen, x_train, labels_train) + x_test, labels_test = _remove_long_seq(maxlen, x_test, labels_test) + if not x_train or not x_test: + raise ValueError('After filtering for sequences shorter than maxlen=' + f'{str(maxlen)}, no sequence was kept. Increase maxlen.') + + xs = x_train + x_test + labels = np.concatenate([labels_train, labels_test]) + + if not num_words: + num_words = max(max(x) for x in xs) + + # by convention, use 2 as OOV word + # reserve 'index_from' (=3 by default) characters: + # 0 (padding), 1 (start), 2 (OOV) + if oov_char is not None: + xs = [ + [w if (skip_top <= w < num_words) else oov_char for w in x] for x in xs + ] + else: + xs = [[w for w in x if skip_top <= w < num_words] for x in xs] + + idx = len(x_train) + if static: + x_train, y_train = np.array(xs[:24992], dtype='object'), labels[:24992] + x_test, y_test = np.array(xs[idx:idx+24992], dtype='object'), labels[idx:idx+24992] + else: + x_train, y_train = np.array(xs[:idx], dtype='object'), labels[:idx] + x_test, y_test = np.array(xs[idx:], dtype='object'), labels[idx:] + return (x_train, y_train), (x_test, y_test) + +data_path = os.path.join(data_dir, 'imdb.npz') +(x_train, y_train), (x_val, y_val) = load_data(path=data_path, num_words=vocab_size, static=args.static) + +print(len(x_train), "Training sequences") +print(len(x_val), "Validation sequences") +x_train = keras.preprocessing.sequence.pad_sequences(x_train, maxlen=maxlen) +x_val = keras.preprocessing.sequence.pad_sequences(x_val, maxlen=maxlen) + +""" +## Create classifier model using transformer layer + +Transformer layer outputs one vector for each time step of our input sequence. +Here, we take the mean across all time steps and +use a feed forward network on top of it to classify text. +""" + + +embed_dim = 32 # Embedding size for each token +num_heads = 2 # Number of attention heads +ff_dim = 32 # Hidden layer size in feed forward network inside transformer + +inputs = layers.Input(shape=(maxlen,)) +embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim) +x = embedding_layer(inputs) +transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim) +x = transformer_block(x) +x = layers.GlobalAveragePooling1D()(x) +x = layers.Dropout(0.1)(x) +x = layers.Dense(20, activation="relu")(x) +x = layers.Dropout(0.1)(x) +outputs = layers.Dense(2, activation="softmax")(x) + +model = keras.Model(inputs=inputs, outputs=outputs) + + +""" +## Train and Evaluate +""" + +model.compile( + optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"] +) +history = model.fit( + x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, validation_data=(x_val, y_val), verbose=2, callbacks=[logger] +) -- Gitee From 2d832f7eb0d7f94223353e09d5792d932f05de13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:22:22 +0000 Subject: [PATCH 51/54] =?UTF-8?q?word2vec=5FID2886=5Ffor=5FTensorFlow2.X?= =?UTF-8?q?=E7=A7=BB=E4=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../word2vec_ID2886_for_TensorFlow2.X/LICENSE | 284 +++ .../README.md | 187 ++ .../configs/ops_info.json | 13 + .../images/tsne2.png | Bin 0 -> 337274 bytes .../modelzoo_level.txt | 0 .../requirements.txt | 4 + .../stop_words.txt | 1893 +++++++++++++++++ .../test/train_full_1p.sh | 173 ++ .../test/train_performance_1p.sh | 173 ++ .../word2vec_chinese.py | 362 ++++ 10 files changed, 3089 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/LICENSE create mode 100644 TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/README.md create mode 100644 TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/configs/ops_info.json create mode 100644 TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/images/tsne2.png create mode 100644 TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/modelzoo_level.txt create mode 100644 TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/requirements.txt create mode 100644 TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/stop_words.txt create mode 100644 TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/test/train_full_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/test/train_performance_1p.sh create mode 100644 TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/word2vec_chinese.py diff --git a/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/LICENSE b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/LICENSE new file mode 100644 index 000000000..ab652360b --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/LICENSE @@ -0,0 +1,284 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ +Files: third_party/compute_library/... + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------ +Files: ACKNOWLEDGEMENTS +LICENSE + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ +Files: third_party/hexagon + +Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of The Linux Foundation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/README.md b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/README.md new file mode 100644 index 000000000..34b652846 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/README.md @@ -0,0 +1,187 @@ +- [基本信息](#基本信息.md) +- [概述](#概述.md) +- [训练环境准备](#训练环境准备.md) +- [快速上手](#快速上手.md) +- [迁移学习指导](#迁移学习指导.md) +- [高级参考](#高级参考.md) +

基本信息

+ +**发布者(Publisher):Huawei** + +**应用领域(Application Domain):Natural Language Processing** + +**版本(Version):1.1** + +**修改时间(Modified) :2022.04.11** + +**大小(Size):64.2KB** + +**框架(Framework):TensorFlow_2.6.2** + +**模型格式(Model Format):h5** + +**精度(Precision):Mixed** + +**处理器(Processor):昇腾910** + +**应用级别(Categories):Research** + +**描述(Description):基于skip-gram的词向量训练代码** + +

概述

+ +## 简述 + 基于skip-gram的词向量训练代码。 + + +- 参考论文: + + [Distributed representations of words and phrases and their compositionality](https://arxiv.org/abs/1310.4546) + +- 参考实现: + + [https://github.com/Deermini/word2vec-tensorflow](https://github.com/Deermini/word2vec-tensorflow) + +- 适配昇腾 AI 处理器的实现:skip + + [https://gitee.com/jelly_111/research_tf2/tree/master/word2vec_ID2886_for_TensorFlow2.X](https://gitee.com/jelly_111/research_tf2/tree/master/word2vec_ID2886_for_TensorFlow2.X) + +- 通过Git获取对应commit\_id的代码方法如下: + ``` + git clone {repository_url} # 克隆仓库的代码 + cd {repository_name} # 切换到模型的代码仓目录 + git checkout {branch} # 切换到对应分支 + git reset --hard {commit_id} # 代码设置到对应的commit_id + cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 + ``` + +## 默认配置 +- 训练超参(单卡): + + Batch Size = 128 + + train steps = 3000000 + + learning rate = 0.001 + + + +## 支持特性 + +| 特性列表 | 是否支持 | +| ---------- | -------- | +| 分布式训练 | 否 | +| 混合精度 | 是 | +| 数据并行 | 否 | + +## 混合精度训练 + +昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 + +## 开启混合精度 +拉起脚本中,传入--precision_mode='allow_mix_precision' + +``` +./train_performance_1p.sh --help +parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message +``` + +对应代码: + +``` + npu_device.global_options().precision_mode='allow_mix_precision' + npu_device.open().as_default() +``` + +

训练环境准备

+ +- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》 +- 运行以下命令安装依赖。 +``` +pip3 install requirements.txt +``` +说明:依赖配置文件requirements.txt文件位于模型的根目录 + +

快速上手

+ +## 数据集准备 + +1. 请用户自行下载cnews数据集 + +## 模型训练 +- 单击“立即下载”,并选择合适的下载方式下载源码包。 +- 开始训练。 + + 1. 启动训练之前,首先要配置程序运行相关环境变量。 + + 环境变量配置信息参见: + + [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819) + + 2. 单卡训练 + + 2.1 单卡训练指令(脚本位于word2vec_ID2886_for_TensorFlow2.X/test/),请确保下面例子中的“--data_path”修改为用户的数据集的路径。 + + bash test/train_performance_1p.sh --data_path=/home/cnews + +

高级参考

+ +- 数据集准备。 + + 1. 获取数据。 + 请参见“快速上手”中的数据集准备。 + +- 模型训练。 + + 参考“模型训练”中训练步骤。 + +

高级参考

+ +## 脚本和示例代码 +``` +word2vec_ID2886_for_TensorFlow2.X +|-- LICENSE +|-- README.md +|-- requirements.txt +|-- stop_words.txt +|-- modelzoo_level.txt +|-- word2vec_chinese.py +|-- test +| |-- train_full_1p.sh +| |-- train_performance_1p.sh + +``` + +## 脚本参数 + +``` +--data_dir The location of the input data. +--ckpt_path The location of the model checkpoint files +--batch_size Batch size for training and evaluation +--train_step train step num +``` + +## 训练过程 + +1. 通过“模型训练”中的训练指令启动单卡训练。 +2. 将训练脚本(train_full_1p.sh)中的data_path设置为训练数据集的路径。具体的流程参见“模型训练”的示例。 +3. 模型存储路径为“curpath/output/ASCEND_DEVICE_ID”,包括训练的log文件。 +4. 以多卡训练为例,loss信息在文件curpath/output/{ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log中。 + +## 推理/验证过程 + +``` + NA + +``` + + + + diff --git a/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/configs/ops_info.json b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/configs/ops_info.json new file mode 100644 index 000000000..cad5f6889 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/configs/ops_info.json @@ -0,0 +1,13 @@ +{ + "black-list":{ + "to-add":[ + "SquaredDifference", + "AddN", + "Add", + "ConfusionSoftmaxGrad", + "ReduceSumD", + "SoftmaxCrossEntropyWithLogits", + "StridedSliceD" + ] + } +} \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/images/tsne2.png b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/images/tsne2.png new file mode 100644 index 0000000000000000000000000000000000000000..3de1aacf231338b2348281998c6fd5cd0c3c1177 GIT binary patch literal 337274 zcmeFZcR1GVA2+VO)1aZ!Kx7snyOd;PZ=qz%UfC@viZZkJ4$0o_GK$RXGO{v5w&!)$ z{T$D~zyE*Vj^n;_tK0Rt&hve~->-FE59MUUw{51`OhG}hP2$o81qzCFJIFs9*W){a z;V$>^+gck@38jtr<+4%l0secFNlVVk#X z8EM_UYgd53f1Hl{lCobwWMl?!my4qJ9yY(s%*^zzJL|TbcA2&0;|pA*8<`ntYs3bH zeEwM%1|aaNi{kIA8B`!f0_#Q&W+h=^??JFMfG#prDuy zJ3{*H*}_mk$Cr2rr4$7QiMm^>)||HJ6jaXC?r35Z5YW`jH&?!THM(qJRKV`nH~EfY z2VNIL7nhR7#l>%BTPY|kckdvz7JVf?KHli}&(D(+6HgmW0^{Q1nlg1VX@#5yUfo_} zZEG8VZ#J4V_EiN31_Z=6rE6BbetquxM<3-5M zl&fF0r>($kXiLAIPH0z94}g-G2XI zWifX8)&c6hd&!BSst)Cr+D3D_c7A%8{Qui%1=Y;W%`2;_ShC9TzQ|H#8(fY!VA zY`mG0viWz7NanL=&p0z^DJZ_ryx$CzXIW+3m>OtM6Pdo@u*-`QsnXagj zFL=ya<1ap@8T^)Yee)c4^Md30(;V1qDC)!P$u(r@@5a-oe2X z`;pd`&g)rf0byY*va+&56Yq8x^+!6B(so^0`XhPi5>s#?SvPjI+o;9L22G371hclh z{lX&Fy$_sbex*>es07RkGBS!AWUEh1Oe82Dt2}FOpSOM+P5krctP)~-qpPDtbkx<= zCue3<3=9&lT)C2C)pN11xY)F_#Id#1Isf9LeMdz^MEK57EeRE*rKN?QHjW)1w=66y ztoZ!-vS077U#uJ)9BgVY&X$#x_3_AJ_u5vG1IDD-^U;^=2L~mkuL!sB(WB?4hZ+@~ zoaQfQY}F_K^Ul@OC+^!6#HN{J+RT1A zgj38f$9=`EP2gpAHrb6QPj zPfz2EYv&_`oM(AAizogfu#J2fDGIZ9ZI6nz+v*=f5WrF5NgZ8~uuR;$IO zKV6v*9&F4t@6cG78BtVH3eey+XU+HCoBJsGq+9R*aftR*)9LQUR$17 zD34hv&OUki&ru@fB~ovql;q^18`D%x>k}^*jg>6~goi&bC^)lj!-lkLZ|}F3yO+0j zbOieO=?b)2;(@&|*W~1$l+AW;nI7$En<@Ks-jgaaBK=ib-p9QftdetA_mG`@rSpmn zEBifY=b=OCRlyu-HvL~F|NK#PawKf(}SIKIX3+=%Be~zi<=j8vJGpR^1IwjHQ7}Ce)rc5_g46mRS4y^FgIOUUKq=D zTXGUwp6$uYLla`=;!<3)c-v?*Iyy?jsrO(cyFyQ)qsXo?=Deq_t7^uT6nvrmdr_KE zMC1f>YOg)2ZPEB|wY_ni>kDqoHk2(MNl8;lS9nar-H>mgqp69J)R3mivF#AQQdJNe z%by)z8yl5(Gs-l4e|?uhqDJudBUV`ykNw{-bU zCg~HWPb-bI9H;yY*bymg#YQQuPDgD`aqR2I5eJ#O8w}2IKQ~Julv0>ztdE5k^P7X zFFP}{BxZrhw-+&JdXmr4Ep%jL^2dD5QSJ)T($(7@4>qNTb-Mo1l2924ca|#V+{#SNQL%hW9%aMXDqw2bK8e&nQ zC!}OzwVhHDe^yU77uxirGdgosE!Sg+C85x#f3+x{I&~^lr@V}Hk2h&%*k4B{r`GxP z8#b_Dg!)ZYvScP(bd|l&W0Q>(RFB}dQ;?IB3;7avte9U=TJuoC8^N?ulb!L!8fz=` z?4SI3^}*UgaZwtJNYTFU?^^-g{(8B4 zJj~ZM(F@JRhlTPixlc}S6=6|%UdvIwcnp2qZlqQD+O=y8GeUMl0eDEgEH(Q5 z7MeUE(vy>u%%5Lcs0Z`DPd!;<(ya2#u(9uQ*5854`>(4~qm4Vw{_s>*Q#1O;C?uq{ zbLY<10;{+VM^Dv);$jx``o>h{Y*Sxh?)p8n+ShUoSmqm~=jMV|HkYp1uzlbyrAUsZ z{>|LlS~)U%+1c6W0av*7E2)HLzFqgD->|`R*T;Ri=b0~66~>SX|L<)x4>hHyS(MFP z#8$iWFIHAmJZ-t50`#F(%)prSZu#PfeuZw95wL)@y?qe=$jzQ_EPIiX-9{6{k+&1)>5l@1n0%&w}y2?-gzhP0NRh;G>#mZ;s zJ?&t3V&#ChmX;PZEv@-N$+frZ$<*1hmp!1k*!kgg)Lvz3Nt;4n=W-s`j?9xGhm;EQ z-RG#+p}r1W<3O1uzrXR=lhB z_I4_aUO?u6Di;*S_@9fnfI1}KSNJndH)&ezzwfCaZ#na33kQ?1ur@FS0fi&&?*;~p zw8~u6BLp0R^7HdCv{FBPy2NePn)5u-@~!Um>)h_kE^X;wF$I{ibS_@J-`(AvV>_t8 z$Is7$`;e`6V*DXw$4hnknM)Z(~Tl>jh zLg<7;&6)1!%YFCn-(Oi>t@JsR+d?G-4_F}ccRVyGrKw!rzTMr$P*X)EXghwKOay;R zt4g{?0v?_8qkrcq2&;})Ou(E06w;^Wys((ft(>ADcK-g>EK>oO*%xSZhMFNa2kRK{ zKr(&}FZHX+Rg6w>g-8#z=BAAn4YNzsp)UdlT-DT~!FHiJY1at-2{)^*3}l(EV2s*V z#sw%w;D3{q(;W51Q({nDPPBa*I`C90gJ(g;XkHsZrS+HjDB` zpnJQ~cD}x!KZBa7q&iSu=E#`{nP0j z%5RriwzBN(I`vhYG}#5uw6C#oae<*UaR#4V=tBYn&6HhSGKQK0V`V>=q_26$(tl9f zb&Ji_tKZqImSf)QezmC)GHmlIyV1gF_U*;Fm8CIvuvR;>&XN}U^S{xE8iBeRpR3o8 zzKOtVqMr|bJnWw1oC)TFZBvT7`1oZ66%hgU{pBd!=!bss*vL|JFu(XO5dx%(gA6sL zWtOD^L4RSVFKQ*7!@6N5{pjeY@YL({?eU?`otZvPlV+h~orzW>01 zGz?bq_Wa_tOP$CtAz3iqJm%ZVE^xQ_@ zpBJ|k8Mii2T@GtX{kAWl99Y|Kuucp>Hm}VuE;g1ZBp~7^I5=eYvR{#tmVSg*nGk)X zM>!s_Jr#{eJrGYqv1WVoC?tgOezkNJ0v4fk`6(sI9NM<;RS#?_4;RR-n2Z1qoWyjH&70F?A5M? z9Kt~D;pOAwW73dJrr>b^Mra&bTlTP?bdlB8jzob1FX*Tw;NZOaE7mY^<0;>pE0?{g zsF3*j_3Kc5k}oYAN8>3E%%-M` zNzP7)lY`tBU>UJ~`@v*%kipNqU6`725Ii!;{?D|UrfQcKY2-3%oC^&IQ0ksV{Q z2nm(4R>Ai=yXo7TM2GOyKet!|51{;G6rTWq5kbhEA(&o*Z zF%6`&3M|h-oD7Bz*5UjmQYejCId$MyOLpJs-PqqNuQy*hjV;np7}5KY%U7i-h=idQ1@DQK^iKjMkU>-emMaNon)& z1nsU{Ln#=&pUWUwzbYtIG0ErHv16517x9!=*Bx6v4Hb&gH#Y3l@5#x4;9y4oiuhd9 zW@c7aX}_uQx0EbLj-18uH0M*L4~z+8V+a8y)OqD(|81fk#wX*`1EI+h(1!BkXK z$#%m{5Ypr!(llgU7iVB(M8}r(>m43W>+S8e&e%m^DPTF@wUT$m0xu82fLrg=4!kxq z3yajsT#frwyG@PpmvFw#Tem7*yB4#Ij%A8TbARlN&Cf?*%`fg__VJ_mtB>)q(tF;(rwxaWL$~vYQ7n8flv$xeyQr&iMv)D1k`nP zV_jTaBoF>>P?R-ZSzaP!9Bn{8sBble`8n;Sg({07O>5K(XGpJ7TF_!*83V`NElNZ% zi;g}*y=PC*tjo-=7NWGEoo`(M%zX@PFbQJOo&%7?x=femr|sS4^Q`+MF-ZG=N*4sM zYUF$x)%AlqU^3cX*nCh`PR>$mtfsoUAyql80g45g*5KEwP_O@ zm3(bOkAMBTfw80x0?;V3GAGM+r9jRv2Vb?X8RL!gp9P+eIzDKSe6U9y*coNvax zm6D<222Yw|lFWUUAD}thR^1l}Tt~?hw1ysHv2bwDo zfY#bh(NZfnXK0ZUZ0m>i$AnV?01aW3_uyW%3yh?ebq`IOGl=**GEZc_Pfw1Tv=?N> z$98N)r7L@GKjc+v(^nZ-`T29Mt9G`*W1m9;i6LBul75`*i(Q-RmPZcz=hmhUvP9Jd z37KY^t~-H3HQ&xI|2it@*|TKN!)|JLhc7kKiGo75=1WBN(b#+cHwz#@UNBU|y$l{p z5*{MhAq5iu(G8v=%g%LacgyeGSq)Hq1?(UzSF7Zvytpqdc}L!lAA5QE`TF|O@Z3nw z$vKV2ym#0?DvE4Bx3!@~c*@@{33F#?O%G{}Q2ynj@VKJ&DTM z)8F6Ur0IDI6qteWA8$=+qi;vLFFRskG=Mib7h8ec6^)FNTk|bApag3=V;*yIaZyuK zCwzE(z!XFxRU(rIDbl0g}0TA8zj7S%6T#QZ@vw}2t{ zMvDU2`TN#13Z!+Eka^^J%Ondn<@Y?Vm@H=CZ<5* zwBl~89CS>r@8l#|FrvFA%4D4?$mzo%`O8nZ|M*ap|Jc- ztejhIA0XbLdmJt1LCeNAxK6y^OC#iDn{^70r-5nhHrKZov@#WC2q%YYktRtd!q3~= z8;T8mk6G1me+htlcQaAmhuADqeXs9sAU!uLD~n8M1x3Z_4u`H3>%K~_M~@od`^3T8 z`1WDj!839YI0ES3fA}zjCfS^0TtD@R&ev#pagJzHe*XT{`}e0{V*ZhBcIcCS*TwQ_ z>9S*p$S1u~djGpgo6`rGUo1YqbH$&&OJr%9yrb)XwN|s&Cb>hde#;F2GqBSg9;Dgu2^dR=p?s8$oy?u$ML zQ-~V_WZF>0QT`l84(aUZJ@w&yHUz3m;H848_U&Wh<>f6l`v~8Q)Fsa5)NGG$TWd#q zJJH4F<~!TvEc)xjDyxJU^;hPLXY{y-o8DeUp;4Ig5bSTM`Ha^h@`KMIwDgr-QK$s1 z%b};`(IdI7doM|V$$l-c>LD6FfIOzd65wgx?^O&ZK=FvqG?bD=g*C%u*8oM!HoF!S z|20nB6egHoJmygvpbYO7cYAyLtna@Kz`Sc!bzJoY4D>Kx>}zDh&sT8SAo8 z)dc1QL79RAlnUZfSyQ8W@#4jY{UV*LgZ+vY$%pXJ33rJG)IPau*RB9KQ@ouw-@fn- z3kxG;-W5Jf9tG6X#?hZYwjJOxdVOb|MuFwEhixY3&!3+ji1THJJwr=N3mOurl=HX+ zIx7mR?*6s|4W-V1bgy%0mn7qPiQtUEhBKmw3a8q;Hz_>)pjMHs1kl8&&Z5jd7AB@z zDUQ;3#>kto@A`cOCm(cdt5Pk7T1*XLAN-r_Ro)z^m@$x7f=aew?vSLckgBc#-a+Md zLMsXqsHv!!SXrNcYwGoTbdaykyzhG>^r_RPO{`Grbq>4D#>hkprfk}MJQ-_|Ck)8c z$Whl|W(V@=&01cH0Z=eEh8Aygz9dpTcVVuOFOP@OaIf8g8MzcUExg5VbZ6H>SU!#R z2$g$?av>g~8386AMV4dWPGx-i4Sk1fms8JUUZs310NrvlVE@_ zO2BYPL;qCjPOSiLB^DBA*k=ZFUUpX20KC(Z(o!OOZ#yh}1;;+pZF$ZR-GP~j>5`7i zgoo3gQ8V6ANZC%`Z*MSf4VSd;3x69SwfDr5OR8X+vTFV{N~`6gK>qvxz2!i-Lf8OP z^yoazC%)*XgY`*im{55(TmAj~VBN&RG^{kqt!!yg7g-#?3qT9uBOSe+Sb4_kx>sEtwUA`=Q@gfvM47@@`tr{{OR6pG@rNkl}!&|$O(6SuL% z(FVdsOVi9kSW1=dx>zNuW4rbNy##RlYZ)M|A#fcMpkxl?$mPZk}_~75G1j$zu)xDy-jeNm27QuU_aRX z>2NT`fpO}6DxPCEtb#ryL8vH3YG6o63dCWSlPBetmzT*h5!|5THYbKz0BKa?p`E9v zXPWE6Of$DQl)2(6U35|4$gK;incB7nS0N>FUwg9_#I!|)ZV!G9>l9k|4X1)XQn>%M ztH0-U<8Y{IJBrB)&&31x9hcy@{C?|$@8L|K2?8_-Qqa%{<+13zQGAMzn_HQqWc*H~ z)6fYC1xw2p#FPXpGQmmH!1N?|f~aS~96Bo49etiKfJ8P3vX{7Bvm4Wg<<+g(D;wV{ zZ=;zv>FV4~*dq8t0Ny9WP$G*#k3yl&FaEBvo9vaq8c5+g#36(sk-;Yp?xU)z+GYqb zet3EL`ISP2&dyF^xDc5cFpat`a?u(7nz=`;QjSNV`^s|jk{ytTx1 zxXs(uK-vh?M%$*BAVZoS;}6jLI^NG$sEKwztM?P0ux9<%TDaS=F;F3a!NEyvnt4V; zIpPfk6Ef=7tbX)TU(_*KO!?gwwSkH1RQv(t$P%h&8xXt#CLcu{;KzqWwBfxMj27Z` z2vc*PUpOW~urF{+95KTi)%;X1;oA)<$z(k`W@ihw>Li^o*js+N2^8W`dvO7 zQE2rw$)M8YbDn9oMmDvp$?>!u7OTieMdye!jC4%=mQDNu=+X`Tvcl@y1F;%jB&Bm} z^ASiLXgpsL1d;3h4Ur7$1R-W9SS1sa)c;};ZD!@@Otk}FRh48?kSw0I1So{)S%j@A;#Y*@+}z!|OcFfLDE6(Cj+Gn$^-!x`#$gBLXmy#&rU zvF-w&K8=~0vduNFzqoz-c4pRg_Uzw|rITL1FJC^-b0{jBB^c%Q?c0FljpZv#77@a( z8t5%ABCZ+bYM8z5p5})LeShnI-Pw{Hw@fy^hrA6wUhi-499Un2mHP&7Q~Iw))q@^#-21_JbsQ4Xy!3MZw>{sC+Z{W=o2DA@YUrp z;tB2$f??tpj}DxIf849a<^15%z$i+_TPCKHUpzFNLByY0v#xt;bpiG-p|?$rkIUi2 z5tj=lye0}mQd(LG?evP4R%5Y)MTcX51fhwvZnM`OyZ*fVIsBB0fJ1`GH#v)3^Pb}y zHt=%)HR*)wF*9w!DMCIQ{FRXrd>Ap+NLP7lmcUT)<%^1fLfzrWkOKGvOTVL5N?wmO zL*hW-j`xj zoK7$Fr`-lhU7ohvoW5q%r%#`DVSNNt;A%M`2u7SSgepMMjAqAwkhcI=EZ(FsH3cz? z1d*qkjlRZ;g>o7Cs61DT3D$O!@PoZBO;jO7euy!^W8R*O7?>=j;P@?^^MKIMRA1rw zOVB7(5Z%ai{$tLjRYcV4Iz+bgb2W(dLAI@Yc|Pr+bkFIDgcg&|#mlAhLzx=ciPEqM~;|q8KE)+!oRyvJnXk zHr61H0(*ZY1Z*);_+5!!L-q*9=lY#A;hv2)qLxb#C)!$=n5@$*HLT zdSZN?J9o}ngf1c7N8y_G*StRMY5uB(U_pHzsi)2dqC~>0(8zNQYiNOfK4o<1ah{-~ zdpqNZrbq%^Be6kYeU*Hkq%ryVGpX(s(UkM@N`cHwpd7sf=|wORY@txKP4mH*+QX(1 zPtXXoiat38SknXl>W_Gz*nG65QnXM4QShD9=%ggs;O6EwkHz+SEF~)&3`7L>+&3_g zOf+B|t(R)7c_UtEF{GIm4yZTRIN(YJu44!cf0g%A)&nm5=>{d}&XB{!s~4j|VTeP-c=Cql7|t z@WPEBG4#{s?U(&*9Tk5aUjG2X2YHaGzF?g_ZQ3-DTFYD)!1`oygeq}lNe+y1M?&n5 zr?1AF{Gl8nRA$y*AWy(9k|({rN>k;lEy|bj5G1V1KMPeQhUg1&=eA;-L5QQ==2O@~ z>0oveB>~Qo$wNE>^w~6wU_kuZSTXOZ9$%5lj~~VRYobV6Bzt{fjL##c5Pk>J> z+{Sem&=~5`F5;|uK9CM*zp6WzD!#8>u2_j`HRgIT+lZg7u7w}8Lxw7kc_-6%yX>}` zB(F4o{ZnN6x=Z}hr3ZM6+?L#w}rE<3=q5?eLcS;l@Bk~~6&`HQ!v>WT}FmDIcH)GlU5#t{~UdpfYX3sV(+Ttx+-+_ zok^LI6`wZo%m}r-8ytAXsW@QY9)73deLhxT)Ci@Gw95RtOoZUTD9xPEQ$3(oDNA9q+HX5w>g19$HBS^jI?B+WJ|3{P6y`+qS7UUQ{Bz=A<CX5jZb+C1qJglLbaH0Igb{Sv`#%^luImN{Rw*d z3nY;QC<14o4}20@p``a-jwWc8f#`DBQ`7P9uSsHO8VEKSV3<_;y-mBPz6gwc(&J2V z80!T04=3qqXRrK{!LJW1^Y*mn>T7P{7UG|{-`UgoLA-O#fi{(c*fiu>dQ7#WC!P22 zmF%3n(cn{_G&_3}ZjUm^P5yJ0G*XGsK}696$OxW-%94uHB`GOchm)0Y>lTI3?`p14 z0S9(>b8OauypwAvm=E*YULpz!h`w3yge{WOe0+SM zv&#wJzki=(#XT`4Ig%j=$Yu$4y21u=0O`T6alXPPTh?S+g1`a~Lq$3+@qeShfYYVm zGxI#%k0Q5rCT1JAr1p&@K3o%72V&j`L5(x4ek3{1TRll?;}+B6XH_vYDK>KFi@A0% zR8+K_i?~Y@BqfymEaRZr5$cX!A#``}oz^#Rq44sZIm3i_4yb#xtjJP4>LaP766b)L zO7qz&3a0BMTZqin$Zud!tk*-`C5S5K4ptYD#l!|3PZ^LdOKZ&3(IKoUf2ZNp&=4KJ ze^9!pgkjywY4RVT@+r2*fAdb7tPv^Xd-kif{h+i{?(MD>!?CiSRMr&{pYm_c{A%mZ zSoN~t3FvWMy+W6y{tu6a`5&S9y}EU?wrcNKdv3>CyJKgm@cTUz z`|qz-LDq^NLwzkRz{v!8`r>8vXU%yL$*#a3NBZJ6zFc4bhEa_BfNDXoypN zfN01ai#x|ymV~u=*QB$lk4~+(o3gE?7#bLS(LT6sy@uv;PoZ*xyt8S^UWcD?(?a9g z?dtbDMLp0vK7$a&Q=D4SU=G{!JXVKJwdhRQP3u-+SY)ll9NBp1$VQWQ zyA1HO=aSbW`qGVJ-?wj{<-&|vY;5ewlP4*T@bCn^dUY0t`?|BR0S9Yi*3i(C+wP5@P9IstDKMIRUN^ zsE?K0j5 z;~6I?9vu|eWn*Ile_wlCdL3D%o9YiJ z1Oh13A$9~dN%xrH_;H0RR~CH(uR>5gi{{=>d&cq>cCT~~wO+nO*F{y;U}Te09y2m9 z$U_3k$&Ff!DuJSR6*oShvz6V>f8IiEMQtlRkE=be5Ku(fDEXjEuW??>-Me#4`9rn;lE`9VGfjN0BlO3o~;! zsM~e8e=dkP%L=Z*xx9w|s{4bemf+3tTY!fuE#ZgldOz*7wY42U3oP_Sq66`uMw)0r zj>0QGd3ijEqdt0LzM>2)gWXz8EEms$|=VDV+;O(!Q1o;yiNjEF7xjUHuXeFVrgfh=vwclBjwh{@}an=^z8t7l+P2LlI| zi6G`4jk&F>-yw~%b=$VNZf}l;P^LghDXF*@FP6;2s0AK* zq|vqbXGkj_9(fUYdb~{8(v0Tv;%`|HmuF|Jjv*ZR>cfZoZ#^kh7fO}Z!`IGD zDD_o6Vn0y(L+tX14omtxt zetugfd=LIxB$V>#gaqZ$-+)^fzS9zWjJ+S)oWI5+_`9G9N{ zI4kQ^`QqdjsN9ceg^CoSvZ3}OX}6got|_-LDX@C%wtPWB;No*krHxy+uHA7o_U|az zwe!ekRHG<2w$Kn7ukBF7CNncLm=bf7RU8SD*<#w*Oxjw+%Pj^=}%m~t&sBW{d*6zTwOCWtshp(T7!FO zX}{nIl7LzYNS?<>M7|K*Tz{zx*Al{J7Z$(=4k1qY8BW4&KldcYj+%1nUAsi@uHRPM z)O1l%ad(2k!>3Q>LwayGfr*_xXhtMm`t|GA`obkoD)iQF*x>!Q(bF~UTKT_OfE{Ze z{@3tA=@}TV>FSD}Jxc-QjY2XtVGoCPHQFb@)Ou9V6CVdl%kN;N;Q|gm$PrS)1?Y5N zaRt<^Mme71;rWR_v>j=k7VGenyA9ER6XEC+<50J^qd4$h#rS{18dEr%kd{UcCd&23 zw->k9ZP{}Ym#H9mgA36K3)8}JG4Cv?0CkK{4K;dW=A!Mur;rTvcz~CB78;5Me>W&-52H-@tC|{LMC1H|g6@mPyz1(T z0_MbE{sz%3q2?$fV}Hm84?|o{qE`;-@9VpE;|8lx9%A9RtaUpv@uzoaBq=~a?*9sq z+Ijr@st+GN#3v>qZ~YeTih7RGPPBS_WJ0tYliEwYO?Ut!Bcn_GNLAkMEOo9&Y8roa zKeq`L{+gbio+YLycK75qX;T6!y#DVCof#}jro!UhFd)1-Fmp~ zvh@JZBRH~x!@fO*fy-Yi`>fG@pH}rrhw(fHw@+caChR(Y+I?|CJh1w8b@lW17cWxJ z_cyL9hZIMQn`0y#NpB_%3o$$I+w@Q#E6Bok~GaT@mVS<_>AO?qGo zqYu(*YHBJmQ)4Oj(a;d6_UO^0+WLA-^FwG=p-A^3ARe^0^+-=wveBmB7^)RjRSPvr z>P={y$!RJCIv%g1f*%>6;Th-b9ex>pn}3eD=@RITen8eUO|~0 z6n1$AuKJ_D-%}x_7o>^+NQ?%=yl$iH;b#Mx)uDct=Z86RpUgWxKFGHVP_g>+X9;QP zcWrH97?o0guCzLqK(~3WT^fp~S`(-_s0yn%Wy>6n>834PuEX9Zg#r6_^Z5bsU>?Tf z$Nio>d4*s^h@c}Up7!0xkN#m{QfS~&e?Ig#tGUfCXC+kk_26ql%IvYO%b2aMuEmpRVk$B+MN%lAdxS>ng*o_lGdIif?H_%mD(oqWe!RM+>zJym0--T^etYpIKBS(*}f^m(Z zp0>z$Shc9;nx*AQ@bYINAwP!ZO>D1~IZZQTe6S+_K(m1NK8Mg}8rxacrDB(fZZW*p z2LNEwMo^+AA7S7;!WmVr3MY`|hvR>&BJ$POuTLL5cwi_z_I3MPYA1BsucVt$=5U!8bN)%{d{N z;D?4cn41a8!$dVOFreg|&;?0(pWw}t&_&*U_;7LL6gnXSxG0!dWWBK~JAl}6U{Z9- zg$WY<^yxuELjyBqx>RrxE>;{yg2Nn&YeJ3cQ|Qr)Nz3;I(+PvbNTR?LMDUdE{rmT} zgLNLrGDgSmws&(AhS03c=~-9X+1g6u?CcC9R1%79qYvNB@goS;QB{2agSi(R`rlyp zLtfbmh@ONSc-&xhQ^0dFLTEFGEoNLI0@gN}5p{E}rKMV{me1MLQE?1@qEKv`6uZB#W zDCj2vAUD?y4H<>lgR2I+%0=W9p3V$#&bUwFen%EO1bAqo=mHJ2_*sYOEXcy##5LHBGWq5mYIyK4UW*`M z>Dt=bR|vYkhZ^sHjyAH#*}i&S>e_*fB(0*Gci6fu__wL z8F0fuXltp5{-EYh!6P%MekQN)+=ATjT>#D>0|O7xppob}BOoC4+R^xL&8L>s$)+eN zC@I}T1tAo)^$>pmEK5sl8{F=O+<=tOuI9bOgULO?H01s=!YK>s^qRdajlgTlsS$HAOjX4;+4dj3SK2TIji_xw+L} zTwR3($D6u3KeS4U9W*qOsLnK2LDo`pT*M=l0CAq+2~?-VNCBc<5{DWHt^c7#=3;02yn%j?I}mNx|^ninBGJnp{V~RDTYwiW~r^qc;lDeyha3sPUo`HeNfuDPnc^oJ8pz!Y#uxG`w zxjI;vKt)Y`3h2J1q~udYMGR^cZJ}H;vfNf`HPSV(W?v(2c?OLOGs}hfevlXLB5EuB0?^1>5Y`_uRE*-`wwt zs#om#a_^TY`I=!(Xpiy2F@rf5i22Ci>LHg)t+ETW{Ziv?c%eWElD=?s?jt67H%&ED zc-jVv*k~woK z3@jaqP!`2f)uD;|)Cu5-fcTE1{Y$(+RF@&|JZcB!65>`6u~K+%Qvpt? zTP3?`{Oo;6+yU3TE2_ok;nnS~HJB6^r+Dq=ENd#AEgRA@$L&nqmEINZ_%hNWGaJFg zYSKU-YfKkvJE+ZIbF)$C<~RlF9fz9G%i*SNI3Qrm@#QJlq3|pcwC3*7(d}|( zN5$tKKMTklbF%iEYWs&W=Oe43o|u+!S|5G@{M1QUr_T-Pn(yz`e2Z z3p2}1N-tMNCX!>~9?spmzmMB_qN*W{&UT4$XTRu~tceZ1l-^b^PHzYd-+jQHV#m>L zn@bWBuYofGU|?+U+5Fs!_zWQUU1X4Ohlzz-79Zh44c-$dydhB1qqI~AYI7n^G+HRU zV`yn>&j5M`t_#7^+!?ocsO9vx;~u_JZ2uUR7IJJIp%yoK zD3H=DaQuB7sRlMMqKoY0a~94!Iq{489F!Lr`|;M3FexC8WbXGM1|IL||7IoJX@66d8B*Rv3VNVC5J z6*1mb$mXtYe4T`EaM>t^ zSF+g!Tf>BE|(J!8Hj-*IWukLRl@bD)A)@csMiyWCfVFuNgQ zCC|H2G`?%@*tF)*w2AaTQC_?D(Q+HJ#(H)2D!SaUEkyPC`?^y!j& zqE(D86rMUy7GL&lUutUl5srq0y9+m18CHCJou=l2(yt@>Tpb~xs`0W5RnX})%*Jm! z@CroW1T%AL-Q3^i|9jV}SQOL|!E<_g^q3NhQ}wc#5*tA95iywsB?0{03UxxUt$rL% zwQlvZQ|QZe&{d)GB~TFPfDdKh!i`U~OswA;$DiE~Q$S0z370s=P!gCU90>x)U;2jHq^HXcnLA%FDGkG>rRfCLs`du!i zCGC)4zrbq#9WtW!N12zKH}+$}DBvzs3T4Jlq#g^dnwXk`2(}LL7TDN3WD~_?WVR#m za?jIKYDEBstA^TqGP0?l+YPH@kz08h7PioC!<8cc8%mScNJnuni0wqZtccb0kn*pQ z5nzPb+1VGcV^5zxZRz094?h@3N2J;2g19&?=I9ud6cju|?qvrx^?OMD5&(ga9(r(` zu;{@!x=(lSyQD;^dVgfW<5pT)X#I7B?%kJ6P$(Uc*!Tu;{WCtF zMSJQr*j=R@b0D>SJsX36MeWzGM_E|B(Hg_UBQs1!F^>Rbjs{mX7I6f(mOAsY-7x*V z&>124>C(sei; z>#V>Sx=9}NPye3XyWgTxLWG=o2&zM{hM-j3ovw!<(;wO#=X@lnuBkDu+4FGup7K9| z3}|+c!8rlM0m2^p_-see=qivUk&jFs``|+yYM_j?AV#l27n?*>BeZb>`C1Dg=EB#& z|6K>a1|9qZ){9Lj66;&n?wCxQ!qZUO{L$d6X!ZP*V_Dc908r8Hr6P*hhhBec`>TkD z=Pji{{cG25dw9H8)G5wcV=6{1lRl)D=CO&gu3o$1{$`K?ym}fN^)^V^U_rKH%jF9o zlkYx!NPIJFWNMlXIz_^6dyed(qOwE+3|W-apX;?1@HUd0mQJmt?36LS^y~nY_p#!A zY&JsD9f#6{1lKl%mIk;5jeh@r3mQ962b4y+VtX^FSPgFX5wO3Z}fq8@96GV8jAZJhJU?GMu^B?6$}67J?#fHe;uK7`Auj%rvnMn(6h~H9PwyS(exX&*eT`esigXh+Zf=ZbWIZ+vZ=F*F4fDHq@2)~b0kABz9o(f=Wc#|KBLZy3IK@~^Z68=p0?a7P9fZppI$wxi zyx2WFT!rqCW;Y6nMQ+O$xcj)>xeWnSqN1MR=aPFaKZ%c{BMctY;*s=m zI(GI^9px9Lm;U*_>pcKIHqE}|c>@Wy8;bnLsw!^)L<*GEie;uK*IyA_!Mt)4v_q z6ecPFizk#4PwFU0v?3iDFeTkXeTs{VfK4<7;w8XJ z4iR?c-^-y*5BC;CepiU}`d3h2#;2#1+jO$>YI3Yuj+!n=JNp#x!l1Y}gb#TpygLvb z{~`Jd_6u36IwTfPe0u2X8(efS3V0i5N)E>%I$rV@G>AS>7!0;c9nS>phTb8cnx;Rh zb>D|MKSne-CN8yaE-ehP2}lzR@PJ_-J$d?+Xl@}Vuf0)>IE+~bRD{KP`T?KRKuP{N z!J5jNa407x{_6(>H9r!SWM<|#A72P0w3N(c*#5{;Nt;e|^Ji_F)DYQ9!(Z@5Caw9T*M7#3U zxE0Vjj0dZ}N^0V!VKBVN&h7!>M-Sz~C1<1#i9`i;i^;|fM4mJlT!KAz`ZN_&ipgm% zt{!M|jjjT36jLiJMcCNb08l)jxBwy3phbCgk{x z2+8SaoBR{WwfWHOu9Ug#50SnIFBW$g-=Gm7QV74`HJ+)quC5!e6V7kvg+qA+PzC(- z-GiM9fj^+rz{@y)+&>G}5RBR9O#^YMrRLkH;oT-f(}ZUZotDHd2qcCP36+28<8NH_ z`vjRJ{>2L*5Tg1VpjElp2vh6-W4>h7r_Z1DA%?4IYT`piMX^EuhpsaZ>#isP4`+45? zk9Qx(KK9=CrhdQcTI*cr`JEO(eeZt#wsDB}`J)|4;%I@8b89h3u)axuEHDQaFw%Nz zvERP_h6zPk7{cBm)_@f%(dWN=RYk5tR+FE7@x`p0eC_x&;FRG|-7Fp?*vDq}c6D(P zXhpRhLlC_Q<8)tKNKF@7iAMuSGONa?tqa=Et$d6HE-o|Qrav*fzw@xcjK0%yO%W}#53jYfO;XIJO|%OFY^K4OHl z`SC1@M98H}V|iJlM~|ir+sTIEk-g>fLEDA0alW7oUf}9F3C#9XMuxOt&8broO6#Va z96a^n8ef-IxnJOUL&L&M92|NwtkH|H_`E@ao@9 zp5)mC!!OcT4dAo_1UrM$5Ou|5Se$nck~;&OUCqgnV&E1H6nMD+m;kjAS&5p< zqmowE)}1J(AhRngugejoa1F22Rthh@5{(nA;|>%Q6*>MBCr$)KnaFXvxFr4`&_`_J zZFnvyL6iV6St+9J{qC%*tAKB!xp@y3?mC z@D}pHkx`k5!HEwHZ5Mh#)@T}?6~Z+P2)ST=$+OZ_m=*_dHYD*4EsrF@m2v=uUz~K; zE6)5sEkHPNT+$qeobysIKYVaVR2w>Azz#}HXjIf4)?jz-g>9*d`P2>$Mr%8}8$cf2 z`8&7_@^9VRg|Ae?5BKG1p0xJYL2}lRy0=6e>i05s!5~nhYu73Qe$-zi3n7WQSb}<+ zcB?WW^3swA$Htv;z5O7ff-&0uquun)cE6doDdN|N^7)t>i7?MiV=Jc<3|m`S*_^Vn zgFSvX-Ai0urQNbAfhOeG}isr}E1h{?Z9k70sVt@j0B`(yR>O;I<+U=(f6n)gk1D z&=q$mOhR^mbZ;XT8h4ATke*}4gM@(nET8}YIq!*#oa*H@8iG<(SlVf#8qDZ`A)T~L zLc_w%V5FIWAq|vJT%!0hGc)U>s~g5I5vmO)EBb90wjH=`>#>)@eODAb3QkTQMB5|d zNF@TrpFFbwR*^4!ib9dO+T3W*Xtaxb;qjK1LK4R`8*l#HbnB}xY?xta@C3udRsm2| zW5=`hq|}2$PdZ!}ziQQRnj3PwP;tFL5KLdpxUG;TjueZ9ixxS%x!nZWpFDZ8Bk!FM=ToG4@mISKbM#ZZ%Bc0Ks%pSc zZ^h=zIEUer?9Q+3V`W`2>&a-71Zzq46-{A?1g5!VPUoX>UU3vza+0>IJwUE{Aa&SOMXE?ijQ$E3Uu*riBoR(`zIg% z!rCN*z=Z}y$#9V%EY^O2{usOXZg2YKs~A_1LOSw}1+pd-xwL}<=s|KbYo?LZANHrq zzm1WZ@9%Hn8S+OssHtV!{QVV_m5b0*2r89nf{!NDL^`Pa+{S!mm(EUik^yA9T2j&p zrJRsI#l&mF>1%4$-kE2>DLI8e(`Bf3_*}PhRna>utw(ORiZU|`-n;=8=|b^I^|G0 zUb|BSz)5&WwKAKVzHeun^$S(UjGy-NdrQFEZX9IP@d)6exRjKi52~JLt?xW^Xll5c zG2L{3NaWD(DZxQOf{#aGNI%!Wn%cbYvZk)CZi2hX!0MfbEC_O&<7M+W>^%@+^3i^T z6#ssARsuMlu!*ez4H&V5sPG(61-Ea@QH>0L*Uu0H#?5tF0y-?UkQT#mjz$&LzkScq zd)P~`y;EtPM$>fD{4!-Q)rjt8`}XaK(IlBQX*>cs0#G9(x<#6In-QOw@uP~92=^=j zK2DEzQ1>QQVerOVuponGB;F-lICE8)xd~8kg3m3L)Q-#&yIPj^8CCzH-(4UyJAAys!lK#Jzn@%_~G$Gs2#Qr1T&;5I_dUu;0G-LT5sN%Y<>` zB-zFlG!)JVIgu&7g27^10T@PNIo-~#;lU5}UuR5nbK@_3wNg^LXEm#%-T(_xRFvpPrc^ zpCtG6A1mz_=rr#oS*p_IqT@0)Q#!-=0(-agNk#ytIySGVEQsj!nh=~!N_919*`N>_ z%PxZr2qFSYQ)tvI0Y)~b{FQ8FS##>lrtY#HTZ`O*jWErG^!Fsgj(R=?l$?d7K6Gd> zXb!dJCQC?=3#f(PFprDfhr zE{o%`D`@^g8~eog_+dN&i1Z79PvUb>_${-h(NFRP+@0z3c}Qkqex)vj z`v(!2vP*yx-T{dT39(iKxot|KH^VAfLDv6f|y!Buhn~x z301EcWj64@f~2I}uHTj-y?Figd5k!qvnKxcD&f-vF8=&^uO&;C@IL_lUs1H6jqg#i z0a8pjNNKLQgdhmb#&$}kXpI^8PMtefMYKi^tRPAxDfM2q>>I6*D+XlQ(OL5aVpCFr zppooNOQA1vM|@B-`H^Pf?5`~|U-eS{*4h_;r#mHPj_SHaA7d_IEahSPQ*Ak;TALpz z?5d=M_+c`FSP|iCUpjs5+K~`aeZa0*V9B~W=_SU^n$?w`Abb*hZE%d&n3kN_hwk3X z!4w@AEV#xVMDchHtuN=qJ(53!D6gE5Zx)S^NsAvz){jkZTZQ%3B|5>`lc~~%jMBnk z=c01St4>Xo5cWftjox^SK{X;YqS;z(vOWU`?&MUrC@?WKt$h3TI@KiQdw-Q!x~-u< zMrmuSjUL_2j|dNcmhz+!6**(3y7Y7Be2*{DoPBy5#?s-;Dpe6Pj6M46sI??o%|5+u z$;R|u-S_&o8g}=|zP!ANLaMBved<(4WO$-0n>5+Vdf9ry^uhd2d|zkrha(96cM>vp z(+RRhHY()%LWj;sEgC%j9=zZP3M}WA5c{K=C=?P9iZWn|1o;GxK(0d42;nIHPyVB= zynGevw~d?E4Ky~}Sx4V;;Azi(!!Pw4a{unH7uL64ZEG91JfY-sr!Gy$q36MM#%yWv% zC574rV$Ak-UiA3t@?*8x*DVs5ateQs64p{Xe}XaRC|;(20x31veF^n?-U9yGy&2m5hwcI?@63V3t$ z`sIIc;uU|XNH3Q#&7X)y7BVr4$zbV=E0KtK(LPV&X__Z=;;<3(WT`-@8#E||@)dH?=^vi<;d)mPs?J{}n6|KNB+{)E|` z4$WRwYj@+A)j)yS;!2NYkabTp@E?zkz8@kLFiatM1^khSw;BLH#}*yF=~qM2MbnDv z2S8#UH`|#{m>>Pbu=tAfcasJJ`??!#(x;p7KAyb@8OXS3U+_2F=8T~4>aDKsPc`e*y}RiB z37im^SIEWe%^!nR9j`e=94zg#rl@Pr%V|kxt0Ju~<31N}7G)JjDO2!DP*0WOn;QMA zJ}cJ0KiA55MsdggA$m2s_^cRXp3wB=)sFo9_{5$wltF{62R3-!r3XP)-&5afk5Hl( z7>L|!L?v|>#w)}+CH(A}c+xIFf6zb6l$G5h4sfomig?r z?bUyFFjN_An`C-U7Q+=AqEg3&cz`#*_ zIxB77qjE~LVXuM7Cu+*l`xfj@ZnHe-Tu;ZMw5sWd72nl->gTz@7CQaqu^XYWH-@N< z)!H#{#aeZIh?7#6%$OS(SoA6E(Uq-z5)UM=M9x|+@;pqkMSZ4&rfL7s3OG<=O&?gL?4Q#ADE*ep@ja?P-tDlo ztO?pM(%ri^5}ipO-EAz9jB&KDI%0@=8f`U2i zH|%$ZB>wo~j_>&kje5OO13- zU8pN<$;q`oJ-+R{wnMY#U7g#O(^ANO$6neu>@N5^Ekf9sq`5KjsTERZkq;JM#VF+SZ-{dMZv2P>aXvqMK^9ZzP_;+YlMDj zBWrM?g+)!mKOH|mzf{0C7J3feiKS0Ds#TC-Ev~lQzwJ2o#IP~97nR`asji_%F!MGz za8B!JRFn{{4%La`Ge3*5Q+%Ai_)tx{(>~Qixv12VjWM2PUEizUGoxJG9prVjaQVlC zx`OJO8fTm$zdr~?q3Q1Z2ZkjjBw(4p#X*jG4NO#_Ter!8n5D;0{?&?PU1`x3kYkoD zLSK8=TNt@iPF^ojStDVRAJ9*IXsmZg+TGdFR?YbyuTngY=X6F@c5JBXqxHMZ@A5z; z_rUj>+gevw*`#PrdtjnIW1q6(oMnP*qh`0V{BD)-=~anLNdcQ3uEzk?)D^S_c7h;;A7+F?M7APcAQt!amV0$;WXUXNJ!cU}P&fx;1e z*4O7sOPH)3F5kf<{KMXcUB2z`+nR5VJrl6NzoO6n?D)p%u&j8MhI1E{2#^tO3)sVo zKfwC|p8UBiTh!3vH7kTnE^wo2;$V37#dRT(imeX3eQ% zqyOZs=67uCxK)7ZUr5 zigBzUY-{7ZJ)Ypjshb9?4FTzcB{ z`PWZ4OPnk|n|`I%qeT0W#;73=v>&BBTwiAQZLd3zHO!0$vMhE)QAN1@9l4LeasWoKbUn>5?Ty)WP)Ar*r6L zpkHl@d~CwPD{`gDXmK+9RXaiWqfqurFx-zGJNBq`GQ9CyDx5y;p1)@Iu7VY_{P*Zs z^5)~XS$diuS7V)6`1thf$bGd|AAB3khk35EnRB*n49dCWOO-)~ZPbr_)S0?Wexih& zj}Ir{&w|wiVi#@;OzVr^E;?Ej{WIYIFkB!)kpy4_QK3SE*t)lok?O$e@eY4n$(fDl za~_-|XW%;HLg__CUpl`xpNeONsXQOhF@`|X`kGm*Rz_l|VGO$vxs%>>!GiQ7`4&^$ z%3jLZI7kiFl1^K$RkY(DSU2}MBP1Lyt_c8S@!%#c2`b!mhQ@=#D;v*^5qIUL<0p4~wDKL$~LhE+& zvHb>a$cX#wve4b#{E|n+r+(_H`QSHAh3 ze+2&yX?}hpP`=tfrk~@cj8~(uHDTL;_ZMO;p;&v8v9E9e zONkz+bxT3HPr=N&-+g=f_Uf=Bw(ZWsyNXH*VKyU7mk)Us*X4qB&qHAcbaZboTm0#< zht=ZbgWgtg@dwMFq?Rw2G1=oO@z^`^+t12h4Q~S~+UBn^Pz_PJ_N|x0i_T6}w>^Cf z-@YBf)iEN}ff?uPlO=Ke)0oF=m;PHe--NA`yBJ=|cVc1{a8c-WHq>nq<#r;)P4NBu zKmzwLK?#np>Br|4D^E^3+NW<{By^!2>#2(XYx}R+q0|$GUmoO9>b$jF_N*{)t@85n zV>W|&%l3;e>r_oFuelxZS4%Eny`HbUm54PHvI1&zy)McVB$DTVFAFyccXDn zA7rLjm=`l7sm8=^DS*x;`87WlS`e}@}>E=}ob^t-9vsN5W*BK^)Ji^8Swnv>hf4V9O)pRHoBi`-=%Tp!y^^OCJ3qWq zBN3vg286P|ted!xvBkYz#-yVays!L-uhfFJT3UZeo*WG^Bf)`@Qo!8O4tx~4b{|zV zZ17BSYI(V=06oP^-%0D4m~_8_`pX9pGU8{?dZyY2nJ466?&Vk)4r7EveROvSzZ0uf zLX5$p-!o}ZJ<(!o|9fJ^W6>f4AjhGekYFGWB$5mlYfIq@m)4^p4ZvqwSM+&zE(Re; zk6k3yeibSy2(7+ZnEwaMkYLTj?gA(g>UsX#($*3C+wzd^lPXS!x-D_4Ft!C#dlx6 z(2o;)dduH)zjv0@k;q8y2OaL78G+a0pOA=>d1niUj|j)#bCvowf1TtHrsL0B2&JjgvW(n{;7?vn@aUqP5UEM z^#dUtd-m!T{pv(v0rZZUg@yBzjOTM?@H#*saNmT=#g7_2bOyF zL5|7vGL6-8Ork-!6@&MPqD*0;O z%W~>e-D9dx5tfr%bc53)e))Tvrq+?Ai*6mf1_TUl(VIAEyVdc)B0{$S4%^C@Z$Nk? zh=*!gec(bSOUYd-oBn7r+iG1bvE6_0@ZlMDc5Sb+NCS55Vq4qmH4BLP_3JzcU05`3 zLkYn3DCRsOK*xh8;7(KLQN)w4ckvVJb;=>_e8-@#L4(on5t*>E{@uNTiznBAeT$~! z>!91mH@b*G0UHdNb9PFT zPCs-y@Rs5UaShc)ffb{|1q58wQAJ96qVy?4UxTxu%T~?%aI3|m<;Rq0Q?FH}8JdmZ zU361#cPGZ-uOM)46YO9&e4y1v!B@8Yrv>=RJR(ZKh#tQOUq`*qD7X~{6R@HR^%8iM zXgbKGxkgIWfXH@DPtLLJXQbEG0zYloAU~8Hsvn*wJ5kK)UN=dE6BOTw9&&n-yXh;; z=L|%T10BBKJyTe&=_xQY?Q+np;>m=zR z#m?YLtMITBZL_92Y*5inRX(-!slv0l?pYmE1{GOMZ#$#M*s)_pWcn~yH#ak$dSIk- zM}@0f&a}Jn=(Y2wh10Ze4jFw(!q4#A&;2!7{^7+G=B6Gy<(E2-oOR<+d9MFw%N&+^ z;jb}gxPKR&zy+MNv@-iQxu%r5Q;qVLq+Q+)5dzdMtnmC}URYa-Lgu)M+97F6n_Va+2VTW4~(=3K-OSAnBYc1obi zx=o3io#i#$KJ@@E(!I3gQE3wg%`udYHb18J4_!c0&CJmLQ}n@uHvrb-CDPx?y}93^ zBQghbBDbs~R=tu2(1Jr#eEMb9J`gBHS^sP9cdFQS z|KPovmcEK8&XyfYh4uQI&NC2R(7PQJIaEK;GyLJ+w40;46+L===2M9tWn6xyO;zn@ zs}~OMbF$KRDV3AzBV;#=NA$qdK;%c+op02zid|vi#n@aMsPNZ5kR`mrZU6b_8qfp6Or%Iit-O;Z)vj_F+#n_Z z={fYi$LDf`4<9}-HS`}oTp@T;E+BL+JW}nCA8qQN?n%P)%&A&*BqTL;C|#x_{Vwf+ z{+XHUXlU~FMjt+SaQ_@H;jm=rsQdPfn;oR__+mLJLOVG-d(-7{bJKkT0|Kt$iav4T zgvX(xGhar_DQ`0k-4j}CSDAK^8a;gF%d6pWzs9ZaemBlGz_)pg-rVGr)VU50;`2Xx z)Ro~|s8eZj0CHOK%^4c+5|@;`x6n_rl?2E8_>@u)yjkqp2}FNqI6+bTPBu6y&V^r{ zeRafOnA6UciJTFu4Pv+02|I*q67;@gRi^z^F7K;f&vC${P1NHNvc&PfK|i$vCt3tV zjHM==LL(x+Hv0H%&OX=2=)+WTosZ-(E^mUY&X>BJ^aemh#H~Q=&@wfE;Y+lC>rGnH zR{MD&eL0n(E>An<%Wi#imi;f{IBk*A?$sE^Cff*!u5;;wMS3^w&6=C!p?J$vi= zG@MvXh_sso;BJ#@uSA;Bw4}cD4tPaZetkO?9otlnI^j6QsV#DmSbu`71Bpbhl7Dee z&6!CfGJ5 ztlZH((|+s9$w~KN%^?|ey+VH;+tg6ehE{EhZHoHXvC3#vGwg?=v&CVegew(WD?4>! z`t~j^qN%5Pza}E&ijOyXksQ{3d?gtLeMXGfkNGN?XWR48l7XGl%uS2Mhdc_(sO0b-!`Cx0^! zKgPlhVJS_D%8MB1`mf8ai_EO$^3+^Cboyr=vDtiP!xbTLWZGJZcbf?TYm1P=p?TZM z@Nqsa>Blqliy;o5OCLUb_;Pistp&WV@cjsBI!z`!cV9$AA^r$AZr8KXN*`fjQ~-Xq zZPwPmF8B%=VX0?l-(5TcPsol(^SpzRqti3Oh!v1n$GH{3{2X_0G2VVYOlNj2ZHkb% zP;!MyP=bX_Ibzoxa96#apN~3t#i@wT*E$qID_tun*nx#el2d>l3e|}W(WXX4#VwFz*Xj4jDA)9|k8>@uN{kCI1*rB}USPXjH^Tm}!iSUgx5_v|$ty z*8Vc-0Ie>JwOM5LNMw;ODkLfSXDjgGsxYFk@@`Sgg+vfcS`=C|jKp48&{T3#H_uw_ z{Uq>x&y$xjc5gc({XyfYPV$?6(_3mK(R-rQ*Cshp6gPMyDs;v3hf;0fyU_$A=E{%NTld!F@4JnT;1-BZHm&I+q|rKef|P7+q!Op(?Mo{ z7K{f@m2&14i)OqRKa;36bI!QMFRmEGtk~v67mA<-L}K!Tq(9bGLOCz!pbjI$T%RpJ@_ew znfQUp7#=w=92L88V=ro+P|=ai9m^OXWh5 zvoR_FnsbwxBaZP`2Er_N>sCCKUILS!0$w{N3Keg|V*A;0%FS{Y9PWrsizbc}I1bcL z)L=A~t5C8bYswW<+{>-<2aS9UCAg3W8Gi5fEvfjH0>hjq-m3k8Reep&b8X}2an;B@n8-(_8OO_ z%Au=Zy4~!cnFKwA+1fmFY0ZOV=erZ%jO!8H+S$2oEgV6`%s!i3`u!Qqb_#nr@?CWC zR7Td5i2VcH8-|qtMqj8`7?G4?X^dr%*+Pj3BE5g82&oX~2D#p^ zrH__nc5vom<6M%5@*;seBspH6!i>5_5@#Jq@RvG9Bgmy*l^f$raOoY}U-D zj}W?Mmn*GVgh-*yRvZcXG-qGjvWJ$~at_n7aIT3mLb>LPd3=1L3+J8>6Jl|BEi0(? z0#cb*cc%uhH!Ss|7r7Y++5cMfcFwaoPjKA>w7Q(J?&eL@jrLk*hL)hLwY77ae!){~Op2$c&u~_#7iTR{a z1t`80ojK*`J~LIaLLru=y*C$TagU5BQ$YA7sCu^EO@kI@DT(%v+pP*wK2HE-EAmwK zxL%Q)R~)%RdrZK{w_$g$7j!mSrxKo%TEUS4DykQ^-6RNv^GIhz_FunUwM zt}Gp!<2Ka+Ro~+F-CW2Sj;+?1-R*|O1$^lx5YZoT9g#_N1H(aW(TpMelj#w>o#Jl( zbilc#kGJqH*_2ZC8XcKb27ij3CxwXaD)BA+ov`)g3_@}d|0vya&lIUXFdWS(>*>Ny z5da3!I}IKLwq!U2u9tet3H{?Zj}_;9`yB(SeTw7|viBv=d%TeqTaofC07&AT5cZ>; z9D}e%TF82imAZ3YVXO(Wo!%;MK+k?^8`T?S(hnZeqskYz1iCX@#cup>Fp%JxNVL|F z;G)sREFm$d(SJz@qp*PNNEV;`?Pp*Qfr^V@F|4r`4#T0-58{p#?Cr(M0~vCSS6$w` znB$Q0-SJ)*`Eu{HrxT+4th#N#rm-u(a#cRBYQ&n6_Nkj(Hf$Q#^ZufPd-twCVjk8M{`J0y4Jg+57r1Iy zh>smCR8ct^4JR-J*#PItly8}SJUIKoaDah4eZ>nyq%mXGe(1)bal$@RmS7OtCqh5^(CPD| zU%h#wo_g{30NNr%%0fH94Xp_e;}y-5Y*UiwoclDPIK?_4(8l{qH0QC^^57A;wfO*V zJkr7s9X(o^-MlFqa}f2%g0?;o7tym>Bq_Q&bReXA8s2JHE~E3~nl@t@?e+)PY+qEp+OvpWR(h4aJOFM?b_Y7C2i=-N+05C{MRdBKx8(DTl`pGwVk z(%sV!+FibWAmD4v;~3M3qN4bB+2N=^89_v-nMgL|^^ga2l>vzxH(FoCAApLN@-7k# zlRY&N`;cPOjxVU)u&JZUT-Ja3q!Cc`v~>%do%;rZN9tfF>GZiz#A1E#Hwp(vEqTaL zwAk}}z?AJFms?3({I#*uqZ9ds_d|beY3THM`jhiwWc29dTlsfnoOtwJOsRVSaey31 zpw1xv@reUWXb<}64jbgY&APtMNICY#!qe8uvAwvL6qMz0qK2Cg?V$p=6qZQHjW zQMOi9K|we_L_xle`5%aJQos*GR=j;`7YHR3c0;A|yYA7u^1An_#m{a%*!*kDd8dNg zQ+h?c&-%m11vM!DiP4_d+dZ>Krv1>;c3IRBE-7mT!t@H6TFH8oiUs#{-;}mZNX#5l z*eNF`;-P%n)WvVA&1{!zKAe%XWy++i^dlWK4y?|KF?J8MR$SBN_oWd;r7xht_q%eo zl4H5;7@vLjo1fk=H%m0ne7~lL_Ll3BgHP>#q8ilPK|bi%m-@;yD?cw{h?3`g;Hm@> zSt2skI6k=DWi!V(S{ssoyAriv!>#uh&vL+HDGT>{fn5Abr;1p5I@`Q0SIq9BhYu<} zqhT>UHJ9YcBF8MAtQg}E*;+UYzY*YZ??HnC!1ET6iNV~<5jwE09CM7&8gV3b(N-rnQ>i z-o0bKMJ5`Y86yN%N|w?)`HwBNQW(~B+jhWsc7qQlD|&KZU(Vyt=m!ZmXb%}kAdZ}nf>|@32J{@Iw^KSdUwN~8kUx2XWHCN z+kFfBGN7c6o%-xfj7$9Zh95qb(XY!AS6Fp@KdZm1`hZnS?~jsD>Z<0t-?}I@qpr>V zhZEnLh`ZxFr!6Noj$J5nblH0il$rVOw~vb=djL9v#Rm{IsCY*P>U1%8CK4 zqzYu80PfP4N2t$G>;w#3nrE4sWp-ekq5{G&7}KuPU5MKUg zW%a^;jEc^THt)wy?fgw%jAO(98^?BnyGmVc6V^p#eBGphG4k^FfB47FsD6<#N_$;D zpZVP+!se76*pL>dx5K)t>yp)pKUS_^{5w|-Q5~P3`wPq{i0yYLm%39f1a-!>z6|VN zo-9c-DDkoUxgs=YLiHwpnIqp{^q1K=I7~Ldx^eQm!OPZDysRtWP1nL!@7pBX1 zGY*!W=}b;8@5BPw4`AlmrFHVmKLQb>c@TC8vca5?R!E?@1WGebUb_gT62DZv{O8QY z6NT@J{co4O$tT@e{)#3pGr{{go;gnvRyY2wh1)WGVfa5 z1~w$EPU_^oPXFaJ$)l*AN|JX`h`9$2M0Oi4-`X;O=}AikF;Jdq!Q* znf@{x*;Ot`mO|FNII;irj@=rHvZM`7V+8I-WOwJ=E$LYX#i!U7ISxk69obuqY##?r z`}Fu2BR$u9Lfzb@;lo4Krx%UR8*pWpc}5^YVrto%zAJZ!nZua_(Q(dgy=y)?jVmZx z$iNLX8$x7MExoc8Ff+RG>zq`lGBaIpE>ZX4SO)srU4A>iK|K(dka68K#VM$dMKdUB#Y!6iZcahE7)Bp18 zQB?`S;YkQz_JVJdy-3C?RZ+V(-bs>D zTs96%UtQ<9^R2z?GD=G>xz8l~JFmF3&1o&?hS+}3ko?bU$Nw26etGA29V}6v=d>t(Abn&%Ob9n&t>`b2%dC&YU7G*%b`oB zShR09bZN6s(gMMbbSw<{7ot+?cW&g+;Y0S%F?v4g_WxJL(dykE9R$sc=XXVc-^hmW|zBoa^w|uHF zFIjrmdDIkO`F}y@FVV&AH0kTN>Mi^C#P=E0Dvw(kw*f_oj8NtyWd2uimhRuLnB4kZ z?#1CPXC5y2%j3pzX%+lT?#pMi|4wt$JoS1ZVUMNCOVHErpd(2>>4I=tWUfF~S=R5O zpm%>@EIn-G@VL38R$q)3Q)gSt9shmrg^!P(3i2b z)4FqOKMaNloZRlZ%!;*oSMz&ryEipEz4prtc0V-u;G7qdT0c!cpPB=Wa82{psLy$? zE_o^o^7-3Epcw)UuYoy5y$p!n&728two@Zk<9*7sUaBp2m0GO`BNG@F`sw&-HB7HZ z$u%h%1CECwkcKVN=}`*-U?j`Q`0?$8=xpTDGU4htHt(_?<%0?o0`&3)!QWOW2io>-YHT#Cf|rV*Z6ilQMgYS zK+%Tv#RVjz2ze339HD~I_cJ4xF42;j@P|9RV_?*BZirYfIXW{h_B&Dc#;`J0)#}!i23U%!2ewHbfH9H#$WIL)ZM6FKR zt0bu!M`L2H_LlxfPHw2K&56Y#@4KL&4gb2r9a8{&8XL@u<>*@zdh9gecqp58$N`vI zkD-KSE$7J-d-v^9P0LzDzAyDs{<8QEuttfT75K)qSCqey+o*EFvy9x2-Ks{@z!Kt@ z-^QES(AYSgimE7UGR7$K&pyzKI31KIzTlSL`(ra|ZzEqJ#Xv%wn-Cbux4Z0@IS!-Y zAiq4tW^mZ8D1*buI`2_}0F4XJe|a3Vb7yb3Uw-j)(wnhCw3U-fedlH{>V&?*ss?w3 z>$*#}OxeEbcEsq7neP%#__k$VK@nry`N9?5Tp_J$*z#*NE5ih*aloT4rFsr$T;&cN zIKbuIlk{w@Q*(hg2=Y^yGv{fi+*}Tff4pm!jqfq4&xh1=H<`s>=qJ|p6sCUn%!>~y z*)|v+Te{y7<=v}Xe}1-yP~AyaT;Q(48x(ZnDh3JR6>pGe_eju*j=__*PD`q!Z7bHmaWRw980nSdiI4wRCIbsm3Yqg4<`eD+ zK=9g(L~-Yfe!+h#qX~e_3Y|J_VRI3!d@I^Wl@neV#g{Jo_B6&oU%zdK4y|7T|A}iI zXb_JwUWJ4t@jBGYy={HkZ0)gA;_W1D*HW$Qi)%$=LqnpjatS_}W=HE>+P8QC)^j$! zyPp&?;vqX)C;@5BA7?HJZ>un04T7A@9}>&Vjr|yKtigfF9~JRAc$~$(lp=r{-9U8~ zr;P~W0_4~p5_0PLn4bS>0pyWH15Jbj>*{R!<%PyEel>wxvz=0N3%(uq%-J+sFw$T= zlT)Vr{8l4qAEbM$k{^lKMA6eofN0Y~;PO6pZ4A0sz&0^#C5>ADXI=FEo9@|B#r=N? zNhmJ}$$jHS?~RVOq|cK8fEA+iIU2rA)dSd3yR)->1Ab^8U)|u?_3iAC$Nr6G)ob+b zL>tYokbLzQRqBF3bjaGJNJY>ql%Nn0S~dvMsAJRpYd3N=m83w)==NN}sj#b0(p!rx zglo&j6Osb-u#2E35pm-OXuW{8d!-f|2lZbNk~g#*w{A_r!PSWFDM2Y)QbF=W@%)gF z3+K3xEq&{Ip=H9li3O-m4icu7RA4#z>8n=ZRh{UMp7z8 zcm>Ak1K@^2iUO}C{GdEf;l^^=B&X$lIH^mwF1phP-Ner-1bDDjK7;4~T)<`%ZfC-f z357eCCt?H{(?kq&$-|Wu`HNg#uQTd$`D0a())hx0Fsx(li=x(Z%oga=QKKI9K8UfC z6j`ExuJho9sF7#Vrhd-L%UM~3!JoV~^0(hki7rRx8ei$Jx_ZFseP*uL9?RI2&pnjv zoOqy{%G=kkYq?|9(tfk;>$Cnc(+(YqcN?*-%j^3;T3YpYZqo)hFjri#_$e*Ss`>Pp zGeQ1=CZeWdIRfesoxbQhgtm!{nv?Uh?*MlJQHa=6Ua$@rJS5i?c-rsZ;|`zYASp6W zgg%xxn%XhyBpc13&bEhm%+(Pim-T=s5Qhqo`YAva4gw+F5I2EvG)_t>-G@d3kQgeX zl?W8ENt+ibpm0j1>|tMC=ZeeUx(IgSVZ90Q$kby*bs7u5O+iN?{_&Wr7miGJb?g^W zFZKFiqTTmnKhrL`wkuB4y3(+vrTIs;-zFI+plsB$BHS1()4Ym39|*g=nmo6C`}RR! zS}*S0rOPCm8*!BlV|MZb&kH}JKIK()JK6EUYxlN4^>=kxrni5zDo`6!qDUR*66GGM z&(93-#ZMX5CVNrvu3^cuLW&d=;0cl*c*x{@75pU!bO~NoHvm^k#1>dG2%&=e_d{u< zR}uIwPDYU`OHxMz)7Y3b?>a&cR6)=pbq}N~pZw^>`BkmRJCoh0H?Ct;>pCv0PnY_0 zx70Rm+(2AMgJKGz?hLmiB+t%e!A^p%+aa&|mfA1Umt^IcaEWvf%($$4<*Qe?JPU5z zcz4KfD@iC|YBIeID|#zbXx!A=SpdY+G|g~y*$>~uuj{ScVqNaMnOGtwQ#Z-G@m=>^ z(VF5LOa=6UT@r*eJu`w~z$CQ+1KPl8K|E-r8slkD+tG5b!tUrjVZ`NLp2rxODbWwXJ6dUu)|@TA9W>XTAU+MWyVF+Vc8eC-uKBdf;;!m3 zB8Aqjyt=k?Kc_LtkHuD6FLdu+6O~KaQCXB;u_-<;D8ze zC1!e&4}cm7k(@dUQka`Tn7H6TWUHeLL%xu?CE7+Du>TE~B4K?FPk>GTI+8RLCir_pQG(v9<)76zR z29M)n$mITWNu1-^5B~lQ4q5qx@q;ezH!mL!dRnkNW7*R#h!(caJVlL#Ls?1baY|Y1 zqTPMZlsl!1gCS2 zhG6LLB*TM}JQQOYZHTFc@-6Nl;UGJI=~5^WC+tUxerMg;0_afqJA+EcP9)!281(FC zTW6eJOuz)baB1`q(~_L|)1;#~5>be31z39l8xVyDharPj4&ip(stGE{%}Hr{ssh^1 zALPHoB;$#`TFxxPb&_$+wh_UMzFJq%FSo*|EwT$JpQA?;-QA5rS?-R8d!W=^uC1c7 zY04O(rAN$hsqFHA%p?6ujio0W>vJGiT&;vQrdSVzPK7!eWpnf#_Dh2GH_K0KB>Z!ICZx-9+wy1Kq!zkPFonr!V1 zZpt+yyXjX>(+)bN90I)&iKFy4lM<{AimH1I)A8(`-TP!-T~#n+od6vd%Jv=N{UG(c z**}(C2i}pxUMygq^l;DjHmG5DW2>UpCD2m}a0@C$LWo*vQ5sOniwB8}a$WiQ9rTOg zc5}{H{x-DUGw{&dFAnAFt9sGESicg_0_~~Av$GKg4t)2LAF4lt0f`k6i^OQ=7PKYL z2ns^Og_g{D<)(YZKjY@haxwf7^0zxGD)x)4 z_&+%41X`o~J0hovkWCt*X(n$^tJRKNRlWW}#>hn*Q-4O1p*F5Q4C#2Nw8ds$H-*?ZXp60v04OU3nRdX!nT#} z+p-B55|FE*yE?G*>3`#i+q#r29sx4O87C?*oB&DZPI8@vFCd_Hi0j8o)1u5<`Dq)3 zAaiduxzDRJd-m_oqbq**C@p$~txeV4Q|-QHXxFH!uw$uZUFXbf9AdM4freCWxDAd$ z-iyeHr0VS;2Yd3IF!l+7++k~Q)L$Wxtpn>;;1E=&9?yz!1hY@~B8g()DsgZ>Qe2@6bKeoRsk!F&o`yY2`IY6sLLF4So) zc>~d`S54|U?y&oOl~+_ApSUTQSmA&lSupGLZzJr!skd!8Ku|e1fb6*#PNt-YHX4Sj zo&y*=@pTe=fhhq(ytsZrh&8#RO!f>i#v)z-4BK0Tdun*gsU-?rFo5Jt3=KTk{G6Ou z=~a&iULu8R3p^s!avlt_A4ks@Ml0ctV&D_e2C$q$K!wb=`Jn4}C{9%WpYMmPgwYx)b zNC@G4mZ@9in?aZ|#gMQ2ibKMEm5(vtF9;%kfy^;dMA|Wm+pM{%nS^_THZ1L|w z|L9hZUfa_DPUL*wxY|F~H!*9`NQnDG5Jw0@kkCiOe3r%z*Y4fnYzd;Xc?UvO8>Bk} zGZx7T%F;~?*9RjGlq=Z%)*bQhoJ=I=ULSTVR z+cb4@96{lC7zCOmO1{pueS7tH!q-zr45Q1Wl$z_fZ`ZD_&oCr56p^n(R7BLpF^T_K zyA&V$%=(lzIvrH|Z~CN+ zIln$B&)H?r@fP*A#eq}$=a!Y#`rZ#3KZXEVHxtLbDATR0C5{%(oL>6*#&zW$??>7g z-YcAsAQ8uvCCcViT*Swv;YZQB3-5s6l@6}}BirlM_wV0d8$An6EH1b$*M8GwSkSFA zTb>^A9RcnahF$9fb0t8cz-8XwzJI4PJN8p-TDpMmv;mSTzIwrk!pj$w0?tEm@LStJnvc4g=2e2mxub3Q*Djzzt!4Odma;w z=8qCdI?OmeN6FylZDvhDXTKj|v(%|keZtcEz6#K?T6M!_^OEs{=Y98G_tk*x5oX4G z$`gO%9`OO9tp!LC2M`}ngi0`mJn#LBN;@H?RGYei1KWMrIRMAWPmck6(-R&AEiL&5 zSRgNVH);SP%+`^?E!Fg9xP`ZJyXj}i)n6&WbHSzvWH4d^eE$4$UM0=az>#mUbwgE{ z``q^?l@<(f*eKs7je#;1W(KJxl5CYkUSfjEi^F>gGoB8A?e;BYjdMGjDVKV+@(aH# zM_rwmY=fA?1j8rov=Bc6=k6m#m%vfZg+x(_g#yS;5=_~GiA&hwh7IfJM}y3|lMo68 z{!u(FBeI5enfAh7xD)FK+T(tM2fz1sdpmqVjx_i2^-dl5;Zd3@O#D$qgbDMGS`BPJ zK8OD*E=EI18F=}!Pr#%7Gv({X34Vr#oYy3ht=J-N1Z-yJbPC9PP<*c}-Egb7ZoL2W zj?u2g+jL)CeJT_d1J?h;`rFpk?>~X?2;yBxOs6T9a%2W)#tI|cs}@M?lygA`umSr| z^B3Iit;|M}K*k>nP$MqwBe^0Pv=?3J!C?!eSau6hi-0%xcrY2vptZPm zl9^7>w89J{{DV+X6t4S#JfgKYHvOo;Q*mY0e|-9btoY4NbM}7y`c?JPCYOZ^k(uc)tddu4ZjRs8ZM6RMmh`p? zVk|Nb$$n%i7^-cuqtGsryEMpW6EFlW6SCd^iT3&49mNqJ_P~XqpUgnWcYRRp6~yf; z0xU07D!vo?z|gx)7nt8=?E%c}Ds;?Nj z$2J%vkw4<`dS}L)?K zU(EaxEr=B6`{*yb=BX&n)QdTgyrrzu_&oc>rifVBR&c-JfD{tMeFo9jAo7oi>CM3{ zoZo_rqW&WppWLLg9!du(zFy?CCjZGy-tfR!Z|C{`_Jju>M^H~ScNgMb6|M{S(&Kf+ z_88RZBqSyh6Wrc*!hwEV4XB}!l4M=pd+gY!jxsI`&on0DzE-+?j-ilAM~Ov8b_Y+$ zjBgfsdcc%}#>gHqi7vU&B9&KEK52gC7dxk$0~Y2TsCm>`)cXIp-mZHXt;!d#AOa3n z*?i7AYNXx!k>y_;-&8j`NN-nP|7_^3koV+(sTpsaBBa{yt9U}gxY&w6KS}6IvzF6X zLC>xJP&ORBSZ#aPCXZt~OC-;{AKOh(jidGrPq}g>eluqpg(htLZ^mo^=J~c5LdB?Y zB_dWlp_XS)^#o}Vu6^#+sifI)&Dr*kxKinFc3v*5pm5nh^i=SHpf;j~N#gE`6`J;$ zvmWR+nEk8s{g(i+dcR)iagaxGg>}LM$6sKR;$o|W?OqIEC=0Evta>lgY3(z04s+dc z+f?_JSHLV#WdZm~gYw`O6Pg^3T_pZnH@9ljrN-~~1IX(aXB|o!+Nr6lV=WluIob&S zE_rJTx+~bL=6%i%YiBtGsU5#Tr03H_38!jI%k5=sGNR-XA){HIGyh!rK2L?n0--t; zqB1ln3<~z|55HlD#o7#xu4}Kc+cw4L!xG$c=gIv$R0X3Y+FB2G%Yeuy8ddZMA__y? z0!;lzFwFWF!2tPpW&(A&KjAhiO8r3`+9O9cO}mnw-lQ}YBQ(48!()SOwN~=W-NGc( z2nnsWej3wmHOK;4)cpZNDDIMuOgp-USQ}r0CJS{fJvykbBnAl))Gt0Z5Oye;8raNR zet-5&X5~w;7YNO-{PgLLZ}V4G>Nppoj5;RMFi_lsUG4mrfe7^UDH~yE8!}w3*NMu zH4U0Ong zZG;Az->y&~MCFhjx^2fL`YRyW{!kn!JAq4V+-C%#0|2V&j=V8r=xVC8$ZigS3@`Vp zloQt`fVKfy4SjTRc}{lUWo|<^PwB65R%#V?1>j9^>l_}BP|b?llE7+qbsN3~d3!H# zKd1_~=NEPPRrzFkpvMz7T99-yE-2LjFjN9SBq#DVQPTO`D?%TdlmF)45~KHP^!i&y zaksHcx-%lHk^;elaCETxgyHN(OLM_KbqnMJjO}$d2SiIWMNhbXz3H4C6(A1iEJVMn zpGa5aM{_$)0Cr3gd4_!5h#X*RG5S0{0e5U2wGMMQdfGuz*Syhr@201xOP6R(?D{tD zR2e)icot0wIH+2Te0^WMcJ?e@$$?DY7kX4sr=SIoK=j`VmvSuo&0`)({Cj|*94fhX1I+v1@* zxSb^2>lfT=m9W(RiJF+%BHz<@?#O{sTW+j;sj~RftwZK!g9ptXQ?hcuYoN=t6KN)Q zZcS~{nXd4o{FQ8{ACo%GP#9)hA*Cj@WmnNEO!=IIA=9bgU6> z#1QxC@bb~EP5|9N>L5IOglKSpaYv2}F9(istFUb$TEVM>{4W7)5viTQx)4?qY=$OA z>0NZEIIEq+{d>f^VgTrkvxpLY1W-bU%wVSMZLzU^P@wWx#VyOUHv_{gxedjm_T@M} z!OT}x7O_^RHvQfpIIql1UF3D*7H>DV*{|0#n3p^PO+#WzE^s&CM^g-!Bl0gkxU3uV ztWEpqZxuUKMRq)=^dt}x470o#ESKXD)6f|S@V~Nh#=Fl$XtP=dD;O)9o1pBV`U@L$ zzyAHrH=BUkO#cGbWVM~ExEmPZGPFOB=)RM>?u0IkjrjT7@-Ir9UK%63+SZbCp#P)C zp-1~BKkvNRe(!v#V^ePT@wAloYpd2}2E?ZLrI#+OhbU(-vhj#(#3>4jh0UX^>Qg%VLU^@N{G4Jw!XnYn zrDF$fL}Or}B3D1jOi|qFt+#WTqoZ;5rr+_KBEcQ3Eido0N5;+}T|lD7jbg{11knQ~ zY1CGj!g?W(6AmA69E%#k$&ntAODZ!ySfo2vYbEn!2kPq|;dc>qzl+YA`bQsF`QgKm zZ%esSk|(6YS{SeMgu}HtD6e2}js?+SFY!&g;)9hbG+L-VJNA$iQBHvH2Y9((pxkGr z&*UwRx zbS{qZ>#wIhcZN! zOcIp{E0VZc7`D^lr6}0I2qJg#Z^e$}>IAM3v#36Ze}~)c1IS(BaS5+8$b%2x{@bu$kx|)YOGZ=@Dl0UF3Z)21gsAM@&_Y&rl0r#^kRsVa zwh)DEp=3)L4eEJa`Q6WPJkR~defSym?elrRuW_E|bzX&$S^E}M`>3D4Rv0wQk!AGo z%1=(tZ4Xz<(=r67BEgO=(7bc|witABLjf0vmbGYy45QTe1w{x$hXo7;nb@uU`Sa)Z z-@hYWh9Sj>b4;@R(fS40H?*OQqm;cwB89DH0!XLKUh#!xv=d^@ ziyz$8fTCvNE$?OFa-ulO-pMZzib!OonciLG>7oar=fKAmNQ$$^liN>Jpk$tp)B*=N z8yI}ItCs~`>p56ql#e@^LdbBON_(PF^*Ya_5T<9EEU$;(wCDc2y*1!Yz8sEVK~_vl zf!tDaude$@YAL3}FH~3Ug!IgK*8&8CNt5=%4#BrR=*3+8gA`$dS3zKm_5fgiR2aN8+GyGn9@ayN6>N+-SYI@4dy7|MHnfi3b}8^3ahy5 z8-PoJxAr(DsoYuj3X*ck4|f&0i{Kd2%`&c>u@UFG9E>Xkuj74{Zq(wb%g z{xL;&dRyj&qJVw-R9NOX$$~8;> zEL7{~-@Jfv`}3R2qfq|$6eG_S(MwCG-&3<~;Go zI)FI-hK&7Eb};iFSE=OnJAF>;_ZCBrMECREo!7b_lG3T5;>)Vai=++s7V4*!ak#WH zKB36GQ;)G%%m0p=>#ER;>aoF>!qf@T6ip}oayqa%$f$BYG+SlxA@Gjvfr zi;+wugWEH*ut4qem!p5@C^g5D z>C8#z4r9DMx+$&F?i_wIL1CQ%cx%nix4Q`t8+y*7=SD z*vlaUHju;~=x!+@a}JE^#9bKz-bziDNsL1G42Fu(wG%s2IB88N*AR^HBY``R6=;~T zWEd6|&qZH zD%3JAJ%&6&>&4d>F%=z^Y}+t!J#}%pvTE6zupS9hgUSnA0Big@c+%ip!kgNWVMg6s zMYeyP-*?Xqr9Epmn8r2waWnOVWlYlz0b{x*Uk+>2uabYKvh6T?blQ{oNR^4{u%5_+ zuaGWyn4P-rFaER|M{kc?>da@a`#gJGwBFd>kIHv+8yk>TadIEkJR3ByKWA6CY=&X1 zb%p4R%b%>ZKRMvuwyk+g}p&+|hsGzDHLcfO-*_*zwI= zmXk|lRw3^$bht%h)hrgX|etr4W*H`vWAC>&6 z_B+44yy$hV+f9BP+CS!2gUOQ`1#n3V~5VBlvd^{_h8?*Vrg93 z+kDy?P|43O;*k<`}uGzL;Lp;#ROvH_g}SolnR7Lfv1?4$!}TLLYXuUHwG&H zH`GADoMKM@u$p>Ds+pcBsR%V!dGX?KLwFVd7zR_CIowi-%koLU4JbLT;P&HGCz~Aw znPJTo2NNwU4&;{`~gWxs1Sw`_k;44YaJ)AN|Rt-{ul_2q?w2R3(pbn4^+{qF(Ww(WDR zywEfBG{J?FvH>qKa64pfTPhmXVdjZOl(&w>wp++TG<0PY$cfLy<)x?H&BLliYnN@i z)4#N}PE*I0IG|IxhJKBsDMQV~gR)+>Y@j&LfM1D%xCwf z%$;v(kXU0eJ>8Id*G|MqAqxSB{?P3zpWd(%r!Fk+@s5<4_5Kf%Vv%JvG6pb20&l)7A=x4h&?Lqvt|va zDd*~Sp`;R^mXZ*GfF*5d6i6FhZf#iq+B8|!~B zmEx2GS5Bl*-CYMewc?KdF1$_?+7VbEd3BuK6#oHAlPh1;PS#wrIX2m&^2N%$Q{Cc> zr6;2>P?Cjo;D24u$BCkfS{9!pWJ@bRoW-dEQ8g(vxo^ywW3J3Lfji$ng)hX9LU}!XRxJ$suKAT$voxc4o2w=i^_S4aao>w5#@N!h3;$6YVw|n)B{()oP zEi!wi@0gtY*J;fn(^8`H%L^0oH2a0w-_g5&rudCKa%htB_wk#KD@~IL`e)DD1QS3P zveBB7qEo=Dn>Ulp+dn)}RCUd8=zv;%>=LKHoY=qpvoVP}v}S_UVstDvXO@=Bjm@?3 z`RB|wY2JJlA`IFWunbw*L4yf`G`LGRzIjh@njxzch1;7dihm*v)^;7A9b30rk~w+# zA~YgV+hpH?&=%U}Xo_GhErDVPueQ@b4nRLtiF&}8sp%TI-jn){7~-6>HF1hcX`AT) z{eT5iSvH440-SD(tpQCSijf8sGkGRm@#>)K^HQG9lj`qV(wVu692fSfNhbtin)2?F zx>LnbPhFn_9gKQjdtBbh;Z={)O6K!wV}l!7r}5DBEjvWWDv74en|JY88iUphtwUtF zX7!=F-!|oZ{9f0?XnwhMzj4t`_jeliezI3Z>m{Yrfw@|ugOV{@&^8`>h;8B<8lZ@Z zM(kS7K`l_^*3Dq(((+~QpX{9uJrpq*`RyJuDZYA*6*?PdrQO9YP4>YzZoFr43=lYN zeVxswpKV5r(B^#G+1Wv=K+feY{1!w^cguo-f&qwZa8mc-sLx!8(Cj0 zZh#(ZCH&r^$|q`9kihM~*q>+9Z@_?sG|gVKp>B8^c29aKFgW&ZMI%$4vv)3Nc^v%u z;e=uC!IaL2oMBiFUHLwqlMjsFlmtuHdffJM12wgT!?7TV2YD&)DwP?IxOimEf-KI1 zY+U#9=m37PfiG%EeY(5V!NO7Q;Spg2=AYIdF@JW#L&R(k9Ab5kbZnDu%!837qC$Z5 znH_EHDLXY>F578U&-nSI>R|Q)<30sdPM0!2=LYS)dneA~TZVPaQZ`&k`T^lqE(p@f z#Su(F5Pd#d%f55k>P6T-UCKr!*a}${$TUOK7A=NCwkb#h2B3#vh%V{Clm2QM2tCiAV2h?TuOcF{1X(#e=$12y1ujsD#o*_wf>j&@(E(4gi|H&CCdz`cC9* z@LE{{1BSr9pn5P|x<3cGWh@)7;;i7~<0B?Ii~%NOxOIGF|7ao}T zcX95I{tt$`FG)4_ST{3%?4#=93z=+k!kff9prw-Mh>6Zgqs5Gu*`1zLeoSvLQLKYu zg>qm<+Je$y0SD`L3=UaaKPEQ7rN9(@co>4Sef!E=?L*igd+03{oYk+dF4#i=6ArHK zTwuU$Q0o{heub+{K;z()X7CbRK4D;V4FSHeT%j>+0-8dFGZYXc_)q>i+8my_%qmLP zPF@olOW@ypcEO+o5$FqaLGQ;IA^|Ea@vipzBI7Oq=i1RLqjv2Y&TgX`W+()30XTL1 zdxur~(G8&Ic8+`5Pc^YepFYlh=c9M-gr95$TyqoPT)$m-WF%PsrsMnEzcM_8mhu=Z zq1PA0oo|UAE_2_zMV7Qw*I!HTpYV09d2-qB?&ohiG_w1hbEkj~7^Ola{#wb4Cl3GVVLDzy^)J!KobESCQ9BLT~u#4~v_SR)q>a3{k9YHDh%B@W-W z@DqP+rAzV!O90XE z>Elzc{O{1-?AU2Z+0M02M2n%|cyDB6*#5r<7XNB-a7NcHo>S~*?LT^aZpEM2*A&r7 z!~IC&*hVx%4Q`t#iz=B07s?keH{IRw<&`WMRb?MqKx1(M0MT15ipLv`uE=sz;q1AU@|JToc-0fyX!>Unxfno-HEui z$bzl=e^usg2evIPxY_Ij1#TbM5QcvoUF>XZ;&>o6JA~V!DqREQ?>#Z=+O>BPwHt*w zWgbS_iwo7Gou45=>Dtqm<7w!g)8<7Mj}%-y_y#Wz4{Ozk!jrCOAg_wCbX2ph8T z9RU+Vz%IiAB8)B+3*u3bTP=Ye^E1G_RF_V`IJ)H0mPaMm$+CY$YiJ&h&` zdX=(9Ib#Mhx50|F{#zEOKkpRy8Q(NCqUUN1 zqPJ|Zpgcr3JDuZm*!8rdiT&6{S41trmZqcMKlP|y=A7t!B2lyF1M9KH#^*z-?A@OT%6kG#vnEt) z%mPpG+K|y23TiMA>h5Vwx4B%G@FdvkeoQAD?Gs`K0HO3`vO_Ug_K((+y~Kby(3DE$Afj+TE`gOP(h8XSm z2Z;)!Xn+z^dWUF@aJUDb4Zcel-J>yZ<)22)W77EwkxyUk92V!Wnaz}Fyq(up<{D=w zLTKad0XWTH4$jC3jEd}8^zNOA0_h78Zft&}n$_`P?Uuc%m=ivK-;W9I8q*V*K~63m z!e6LP=n+EB`rw+v2vo51OtoNHQ>Lyy7%Og>&pCB!^6c3Y-~WV8zs3WxRh!(u@UcI1O0QnKCey>*IjT&_?bOdQuFH<@ z4~P*_c#4rGOjhG2O};;$2{I`d9V1U{or<_S71))^OV{D$X%rq}2}wv)4#u{QN`bm9 z)otU;zA=UGJVV2$EEWp6I0UIllL?>jd?5mkj`Q zoEt%Z5#}g@oeSoS?;iU10xCxBkJjDl&8;2Qdh-(d-xb{!!SOU?rIqCtvnD2h`2=4j zPmfD8WqS`m*oSZ8dUuqn1`6yh_-PqiH#qm5nN$$>6JsyOBhQ-getT_8+1D3Hp<0pp z0B5*8=Bb~_h=LC(j6UKTMW?lprGzAv#2|`8Q~)Cqp3%G%Pz+5Yv@p*j?%29MEm84P zbP;{0^S~U79gOPj(eLhdZU?QO6Felm&R_;|i0U(dJ(00W^rqJ6*{hd@y}g)v0LCu* z_1!PpCAkZ7E&3l821rXQ4*`ArR05!UEc{|Okbp{|G#FyKFjs}Co{_Yy>RPrOKO~;w zZtGFuTCZ>dcndIq^nwHZeyl-fqkxsO5cth0>+1#2GUTtpYli{Z!*% z@0&zF3SYMO!S4?Y>=1FV7T)al_4KJzJ)uP90D?7)D=5b{xOU?(cTg#SmpU}FU|83g zpjf2Y0_8VaLNp9`RJhC-vXH10HSK3qbhK%nL z&01MO<4CQ$U}zzgan{a8r8BtLp=|Dg@+N8{((}|tl;%x}L#jxZ%d|p06_p&{Wt?dy zwf4jB$u?r%nUHgYe6;pv&>#Hlk6O;iE@o| zkCOHN#|~u!?PpkD!?=gj@$u`+^@892`#+8gjGc7yALVpch<`yjT8}uQKS&_h$ z7IaJGvBpRrFW!r@VmfYiNGT6!1Z_5HgCZKn2%=4wqqt> zzYlgoLdFnD(!bds1?c8+ z%Ud?a$Ha`JzZnN(Q(gJ0+BP6%xkCrfinev`-0m^q#DOjC2QC{~eL06t$9eB!x_YVB zUVP=g1XY!H1vM81K2JunCX|BWjzRt>?wUU2m1}pX0TNR#ah&kFhKd!MKlx%5U73P- zGg813y_ikxa^8kUF=^3Z>@&Hz;dAVF^i;&hoHN#n{+WoJ(|%KHj=02bZ57ST3*)ya zFiCcmxn;PUGvkUdY2lHr=PUC{i%|SM^mQGjxRD|sh%aGP!I@hZ7mGq)2p~X)oTp%< zmng=MSCuud@)t`O7P?V6=kr2j8!iNntc^_0@1W(e)yuEl#}6Nl%D!ws?AxGsxMiGr z;Jn8ek592Im^}Gj8bt|9lsw}I?&yc7YBV46{}WW>^!$_GBbl)PK0@3S@d#}{boT&6 z#C!N}>~uq^^zAOB^ltV_wKCtzDDkwh(pm_0K!8z9eL%chV_uSA>aD<$mSNF;{rh|E z{ynRa{RTAnJCJ=uPziDe9HtLyqa0`HGxK7%MlDUV5U9^cb;yl8sRLMya-j`qIWk5? zbG$f(-JrGwMr3g92F)){(r!uLCZcBgR~-2?fGuFxZah$mun>oAe!(n><^&mpOOaI3 zp9B;@X>nek4p{ahZSvsxL^=hL9BaiDkzee?2%bk zuo7$Hf7rQ^$gHrrz?+9dcn8G=mOweW0pZx^61lFL{PJyfBRMm3b9Pyi#Y_4q_ zV@AQZ(hF?|qxqjs8sWWs2d&d63xu@AeQ+A#=fAbA=P%Z6$9YEIKDhPtQs$(TSgN_o zGEOH#VK17?lHai|$&dQ$?1x$J)3SAILncbASV$T964Rv~*BNj-RJUWn*u$QtqYZ}h z2uLO3>rnQ)Ix%nQ0(X(922}@;`DBm0*x{h<%IU?ukoEMADXidv>kA4oPauEx z?fW}Xb13{Tg|C5g+T5U^Ad%iR=p<8pQkYR0(JM6sE0RFSG1#J*jmy9`pI9`OE+Gfd z_fyNTN=}*L3aT7my;|Z?fOF3H_nn>M3IL}P>!&tqZ>rzZ#t@(y^A@GQN^G1h|B-7C zM7zQHW??Qjbtt#OO)vKw$_SpkL|#Ox)UlH(4F@hO+YBTn-cI@K*(6)zYN+!g{V19~ znX6Esxw}CYMsX96U~{L?8eo{h!ipJlepUTMVG4DvFnUBJ#8H-;jQaMSLBW9Ykw6!; zCC(11oGKB}@UZ58`cZ%6+-DcxuD*HB<7M;>)ft|Tk{4w?T|7!hy`ZpAqvG$3w9b$W zf{%GUI^0T{bxKzht`R@`C|l^Pd^dOhjQu@7RZRZnMWvQ|SMOX#t%iZM)*b%y%ZbxQ1t zNbo%(4w?rl93|-F%*OSDIY9`{PREZVj@DD{-ofC=-sk|czt_J~y;{LWV?-D8NPXOh zeRp%gAXOT)j95kFY|1_=Vy`VS7b^OWh@;^V#MuvJ>iDBmtfO932aNz3NmzdX?v{pZ zFfM`#-)6d1+S}RN?`MonE)$FZ+rP%~E{$6b6ZA=FYz7I>6<0~u&FP{zjO_9tn#ozhU#S>hAga>qJPfp=LLHJ$DtXEAi5trQph?CrP5Uv!Oeb=&*1 zlv}q;z`GT@bvr%2UQen+aP@DV{_j z6!mpXD3tIh~OP`tI~WyZC|dZ37{#^UWREWOK|ToAkH&$tNdi@K}$e zSW+wUj9=*a8>Ez)YMKLE-e@!(@#cE+xJ>A9x;7DQDW}{a$WwiGu@uOIDJqoAAP6v!ezqHNfMMT3UA;Kw{SsGV$DxpF8OFdG-V? z#ApP7Ohjy4htIDr#PU^Fu&RN&s#ZayucpbOVTU$NB*KDfa$#3<$OILfIq@u@7sEj- zDe9z|TaY{9?*Ayq!Y7;zAe&2grxpFC`bD0Z&p%dPRSVVULc$xyj=~1ZyfO^b?Z9jH zbDwD`H|kxoU}3-Wv#p})HYTzkTKHxm;HwxbQiV#X#2!BxDE+!p{b@|)&QoobH_p9~ zeb~%&uZ_#GdYUSh){#ptO#39Yj8;u8Q+NdpBP%M}QKiHKYXS~5zR_scvcEs`auk|L z%aDd~K1MLrl4V3NvperRZ^wDk?!C7*gdI9{CnMINSAfaFPhLG7NA6}N-Q?s0O-=cU zXnw%>M#d$s!;@t{5)SQ`VSApXXO0%A1C!I)c*kYSMJ_zNEb&F zbBP0(m^Ye{3K`C$yXbMgZ<{tdl`MYnG2I#9l?z@+BjlIyL?BF9pI&|XJc?Rdr+koQ zZ?g-9p}MXQ2lktnC|F$M7hUpYhF*& z)TcO@*C)!`5!W&pJ-}p6lLpNo{Jd%44%fXua}v7w91sma^0Twe9Cx%0+@wFIYPG@d zh3dE0*m=0en`}$T^oP>8y?039lVe8~BlU^w{O@631YBj~iwSZw>8(Um^JG-?v6nM; z7WAF6M+PZ5E2z{WlVk<{k{l@y^4o1fJwI?!U%=VUAk=d>$3NB zdV@{j?cclhuz&X=qJ|HTi1uAt2z{NZ?#fu-mBOK4G}kDs+E*@Tw1-U%>g;wD^UPFC z*}0d)NM3X`GHHz5xP`1h5oHT!MPQ!0{%2Ne70l4`uCovDVKPd9(yDb6=D0TPk@LBd z+4+j!*PKRV1bv$A_@>J%y0Gr4kC--TI%riu-kTj6UDxYd2Rcu4^=_W~d!^zUttq%a%Jx$Ws3vm#z9uf|N58?`+BM_ckuo%bILY% z1nXkS0Gzmn_Vqp<63MTdiVr^tq*mF<-_Oz%uW%7<@@8S1vf1p*FREjVc3e%4oGQGg z{nPPIMwUhRH}na>ltZ>#-b6#wF?gH`u%5A^YqMbx6bY1+6b7ZIi#~n&G;>6gMoc0n zI!0vKJmIv6=*K0lFLI6uD~h6a<_)+o?Xh>*00?q%er>CV^6H^JKo*tC^p%jrtD^9NM)Lqp1O)x(}li! z=XxDB8h3#If9o07nFMBuj&V^XkrRZM0zt71o}dgqY)8)yK`qyJsfT8!`1|+QOl&GI znxR!S)~llJ-Lu`YV>4&Xy7A-8uf5f>2wSV3wO&wADD&cfRric4!1 zn-+~YVHkAm;6SI1zx&TssCe*q>8;Op5p5zv`OyCBW_ChLqOfmO#qjyv%Lb}ygC?i@ zX?fdz52Nhb#MQE~>Iu|0c8pryUzcFFSj3fx6;l!($A`eE5Xu%%Z{sNhc%7si^W9^i02t znUP2uGeW4g1O|`|oNe3M2IcmMbL=*uNl$$bCND42ssXjX1=yn6l5K9>J-jDcf(b*e zWix=*N%TiXA1cssrlMJLq;mynBC8apVv$f#HXRBgC&@Z53QD}Wl z%c|_*L~Z(#@gd(5K~I5nDBGM0ddol>d;zLnkMXn9p5MH4=PL9qbB}I^3hQ2^uB0Y< zu`oq6s$dGie~SFbWHp4i*17xtJ(09S(^e03IcntxedL3bk)NTi`cqS6dX#Q%hVnge|<(sNJ}YGI=w z*b5{B9U(^lg#=U?Er%ffuj@zaufPE|paVUJwwcbKrBjw18EA^Y7mdyi{_*oCKYx^S z&`?o@Go>Eu-+)>-`Q^E4Ww;2EMLqV0QA3ym2X9MCg2ne3-c?(heQpQvr=pkcs;@u% z%r&B>STJ9`stja?r_`jZsZC&pSB|x=Nz!_$b0-8Hk{~wt)k=O@*?K8vB7GSUP?||j z>h`0IR~?yRVQv}a&rbDM%Ssy%>ucpR#S2v_0))W7KgaTZFT)_CysImdSv*jt`3>DO zYs=U@V`%h-62)n{F(3e$S3~lefanmBB_Anq@{bzH)Z2#*7U(q~;v=p4`1uNz&HrMURo&?~zM)W@P z`empnRIGZ`~Ivg6} z81d9)st*^(H?J?RrWTXso*!Vg`r-0ZZrgsUmz+f4FXbvRHjsuJ9!~kLEd0xWMx=CcALYO|A}(F<}GW@!$HBGF)fQ<}V{UHFGRZ)em|p;#MFg@iW8q37SV_h``u< zd9lW$_K<@p7o2WeC75M)&czN$E>PUj6kX>{oMI>*c;+fl7#Ll|lPZzPzh2y=)fyh6 zhHLDZ0O(Yoc)WIOo>=fwmV=`hW?K^fF1~!3aZziXcydvOHl6s1nT;gO)JzKOc+ zx-%@H$-y9$a4(qP*n0PKPtJ35|GPuJS?W!C8GGwmkL`|EAFiD+|0tBlP!pKVjj*&3 zSu0sW7&UNT4qEJ?Xb(qh;E$V+pY- z=z^L;9z$hL@-(}uyZTXX?q#|hKy6op>pUTt|9Iq&9**913bsw#Y%{;NSI$b#waj}l zb;USw;@yl$ggQRHzTy}p&zIf@3g|U205^N2nRawCyyqfbI@pPZ=ojS}`ktA%OLnc5D&hf~MOJj7v315i zs80EWg`rt%!kii=m(N`UBny-#f+kt2Xlz{x95H;iNd(|$i>FrekxfUDXxOe5Ob}z1 zpdfye=^LIAj^*=cS1>YQzP$*d&X_06pDTz9A>@{5%stgPUyBfM6SO7Qx1Bgk16jp;QQbFYxL*5%}j=g<40 z)elJ-qcpZj+Hm+0d$s6TlL zt@_QI%p8%oU`8^=gD6S2-+5M&!s3b?0T42dvPJ0n7O-!EPVTPeMawP^;>CB?NPYin zw)<$(p|&QbrkVhl3aq#q#%mj9XZM!W1YNU=gp!5|OUrD)0-An?d%Tnf+wEk68IS%h z_~LLdU6O^46s!ZCUF{pOo!hr=eekD`@uo4kR!*g*%P0?o$x^|L`<1uv{V zl_Agrpk6VyRLgBSyA^>rAhYWb3I5s^A;HE&;vHeyGf`QxIQqJQ$2A*Yi2Puk0Rj22_U3K@^k4Ty3 zp)(SjXM{wwQ#RFi>Z-L2j*M)vapOjeZfy^x6mVKZQ%>$ zG=s3Zk(5_+_D22Rv-c9g>X>)6oQ$Cu8lioX0j=>U687x2@nNb-ys}eLQWo9KO}rU> zICaL&X@;XRT~(;I53tvaS`{EtbO)l5FrPAI?~hlyEU+d2M>qUb)HC)m5#*xR{K4q;1^I_EVUpUHWAXAq95agB7bcw?Fq8IXjkF z3X=)Zf_nyo9-~dUgFsRIj}sRnnk|FjXByGEYUV*Nr#cPW-T--YAcJ2wT~?NAIqfKv zbv;X)|2?owCqDNjBEtLy3(ycYV(bS3YdFf=xFS!b6~MD6w|Qu9GleyheGeri9SJKq zmY}j2j~-jW+1Z%@qA2?ykW~#wOm+Ovu{Qr7a&)V`AH^hH*M6X56hI}E3KvdXAL|HU zx}F6ol=9xmhejXlxO%gIpKOKr;G5gB|ANjUD}Xxo@%P8z`;}#_wLM%Kc8|=WNV1A^ zm`peQ_cIcll9&G97;MJoU{O?^XGVsGqT8(zKaS)I2LCzs?(kuKu;Eqc;Ze(Az}W^B zIKG0m+|Bgm(LC)=fc*pyyG=!O0Pn~edTHlrRqJJb`qu+4cL>2zU=1JC_Q^0%X)%0s z*-#z2^?qI+8@T&Oe=ll?Olx6ZL{D-rc&@ai4!+ zNx@oY=SYJ_nghl0-deNt$&A?V1c#X;c67H#m@gzT*G|krW$y^!a>PxbmoX-fE?o+# zuDnoPYY%;t?&Je@40g~LT&3>G9DZ+5K!q_~sO-o?gy;0hADPjNRweMLA`7+Q=T+@K zw~FZ-v5ow1M+&%|#!R!T-B)M$mUfZUObtN9Vixv2lsMzu|4dIAzt+z+KL=OAZ-JV> zMO=lC6K6I7l$V8@O@sN1vb%swmPIJS=y2)rBjR5{XUli*kM@VF`{vs>d(^08U;Y0* zW5-@qmQ6{sRk7!d8igJF{HkomQ(#Foy|JcQZd-` zt@QMfBmJ_+FT1N>_ijMOtIFovwupct zRJvaCN_VkIwPm2`iq)Wsa&}8&?X3HBde%jevbvr=X5kS@3>8mQ_KRR9W2y38;f4Q# z*O|(0zJ(e*na&&h@%#sSrfk=I;#kpHw0WY|(CO#~w~G*#Du&|T5GYl&I5`#P&JE<1 zhhDf)ZM3r}yd>4^_2uNfp|8TP+h(3W#jd&4KL$KFxv*A>Zhrs#q}xBoWB5=@3bn)c z1|YrfXrT$)0ts1swZ$a$C#C`4+_pA1*lV6N`0Gd8>>kTn{iwcqbCZ?V(>Qw^bNPu` zJ6(iu2FJSAqknVuWCE9Z)*epQZo9ra5+R06#k7RoTU)ng-D)%UoCUo#%1$I=8sH7| z1PxBeTbB`TqPF9@^cXaS;+R2ezsLW5u^jS`hf;*3p<9df3{(m+*J?a^@XBDF#`S71 zkK9V1Qp)MH%VKL3Y)IXE^;dul^nDx%1r5J$qhY5YgS1Sw#DV%|&&b)d)-h=sQXg6D z;F$h^*3pL#(Aj-Hu~XDY+}Iq5_mW*-Rj<=WF=;uK?F-hA7=~R7rEnk95uD}OslDU) zNOmT=dHboTZbponJ?k!Q1x$KG%jnmbc%!UQpgejTTH+>uLO(|Y;H%6RpZfo#Eqh4*L zko?fW_14H!_wj;*HmFf)A6Up>0uBsu%JzfHnH&IugHgBMj zQJP9u&E+HYj~zX#xJYA7=rb8i;jZkFN!K2|0kwZi{mP1pV+@evMkZ4QGG@XF z6^X&qAxWci;9dyd6^Hk^1-6Z;O7S_+)of^Aw@b=b_S@uST5t6^FqDpnUumnX+4QX8 zM0#Cm(D_2B)DgnnF;CdIZrxtO7RUhlr&ILvOd-P}QDq+0LoZUf6>n{8Yt~F z%=7jMS0_1-Yw9&1CH_ucV$h-7l3~w$+wHls-B*rrJ-7dRj#o)9h?Y&np=1g%w$Piy zI2*s10Ul}nOBMWzZ;vZT8mz16D#3dixCBwC+ftKns8~0s5hilx=xAk&%B8k4sS3iI z=s|)8!AvM*N`Osr7mG~tT11_E>1TIv$+6?dJF|0xGy9!dpv|m23_)O76AGE*Y232q zxB?%z!~#a?QLQYjt_nb*Up71^K~+TsTQu)2DmOC!pmwa2C5Q@30C?E4e2Z!Xr-m7H z12S_4ZUhQ#NxP)>^xdS^Ip4O;`JBwk9L;k>YUw6c5GaT3i%R5nfe6Xy1aO9~>#CQm92T->CWnc0OaCmzvsx&{1v*zv7o!=UOa zn5nVe7K|L|@a%?m31=ym0{~Xl=pq;Q)P=d}-P^S3YspxEh=c&s`}mwHW~A%;BZ}8l zXT=3$$EtjN+1tde?bURv7T0H{oL?0mAAe~vV!8R4T{86I<#4lT*4uh6K9gRC9BvbZ zC5i?u4J9W2+Zr%@`Y-C6T0QW(*6mzan-(?K-g?NtYrh%C75$ExAxEAT%%9Vo6VHYR z!+ITMLs-pD$Hi&o#~+{fU%Ex*%4!>1TNYESW^*w|Wf;{KU!&AF z9rL|0yL?t5c;Dt73#KOHTM)`o7%)@Vqpk|4Y?P(g=qzZCMfgWCkK>>@A&$3azxZpl z-Kp(b3GaRT-L93->D=m5&*T!%H5PQ%Y_Z~~L`QdS5tRlr6kA=MtHR%3;*m_(T827O zwwj5E3|55(H=c+NwpFvnaf#A5)wOx*R`6({iF2OU(VE5XK9^Qws##=Hi;AV6EF$-MARR3O_ z`)+1xgR#ASJd1;e5gCr75>+emEv(gOjd;UrM7)gU9^aaSzbVFPmOu71z-hXU> z5dh;t2n_Q$Q1Q<|F$8_n)viEG%3ebiWW+Zwe1enX~6NTMN~vYi`Lr z?4U!9ZN-8ThStQA*}_6mpUm3X?*kgD?(aDu=yl~dMLlhdeL4p@ zGmnXz9v;APa5t!q7Vr{3w2o*IKJv@0IS&qgzSh-$U)-ZV&zjE)yw~vFm>r*-HYq|E zh5!`cyxFsL>(YS-eoXIPi&qXWM&Dvo0%I}f6lXSYG@t|2Z#{|x%s-+9=;x&xO?URPzZ z@Aqq3Nu-_M>ynTIU(Z(hjCNtI^5xaNh-wELC?0V;e+yja38xydtmIHcp9*_+wbL6@ z^7oOyVMFI7xBDGif6`@1;rWQEu^-RHbn%O|3Q$Q)I^RFy0Y+MF0)I#OFZqPN9FymlcLYp?W`0Yk%bH>%>vht?Yk&%%hbvwQv?DzNg zdoKwNU^R*J)N?TQsV7?M+}^L=~v1k*R?q)fW7-!!kq z7Lu26JRBDbJo-rA<{0IXVwr=qA>dNN8RWdwFN-9oVMwt4NH`R>#=$UhuzhZk^;x!ZsLhBd$^KNLLwd#!D3;%RJm) z@uZV@iaw5Oyjoy9eo!Ix!1$|fziX$VaaF7p8RcBm>{ZGnvkyg7`qaZ8o}V^SASR;L zZEa_l)pXM*?mqyk45xErd-)8y)hyI<)LtLXaXG14d_fuc{JU^hl=-HYMGg?IEH#dd z%yar}c5ca+=#XzZ7nxTSE`=7g5R23Y$rD=e##YSMtTLsFjRb?L11*huPjC`n#suas zG}6>7xo`!rItso6Srl?KL@UgG8sQIe5wd|{IXS)0v`2rJ`A3vi&6~({1QXcZ9nb$g zHlIhpu+ij2yBDj@7&)<@Rs2dlN9|bOcK-UGG=4U(DMQU2OzZv2?!iYE++*aPhMY+* z;GsHnm%PM#JQpr;(BGdepw1$GA85#K> zPI!GR2i+x@PC}tfb3o6~D}?^D5fZq7E@aRNtsERx6_AQGc%RwyhlX2WsBPk2?8R;r zmUMLQd*WmmH0oqxvaS1shBnF1mlxsWQ{>b_t^IU5X^;yy7dI|2NB;DK zW0SxU2^6A4@xp*`kLo~2y$E%6b*;!O@x2vXto9Nf6WX4Qz+6-p-O}6dbxWHb?|ZJyf<%B1+EWA5!v+2q#pjKEwYtV@l?c_xzY3=5|N*o6qLtwsq^)72FXxNq9(pOwho(<_E8V zj05%61qM1w%?DxOdZiTQpIEiFI&Ni_wdkE_j`*WLfBtOFT72On5k*D@HuQZt8E|Cy zu70DQW)z=;iGFZoAi&~^x8k@bE1O1~3UEV5+N5=B??t-}-TQ9Y@+$uRpUUXy9SNIv zEl|ii7!Z@EuAIH4pkNxkb7yxMwr8wZ?9s5&;w;CFj674owSDB7_y!Sv0oX#piue|(Oahwic9 z4x1A9aksYym&qXBo4=TbNG}0k=RSVY3CB^yJsM`=z8FVPTJ&&{*fC^;L1M~OmE8K< zw-Mg^Gzpo+C;{$r@S4-|8-QB5@Sw z@sSW(FwEjtBMLwgBgCL?yWiszjM%qVLUk%X3{Q34ag{zuEg783p?7ASLvO7?XtV8Y z*mX^a5NXJ|wq~ZLF|=$px+gR??0E0%Y0Orl)*~ABx?j37xNPfIrfOiZ6ARrhuj)mI ziRi%7_}qbc+TL*HbYD|x3$AKj095DIoqbRWAo7~WdxecwT>V^%Q;A0iVG^zs@#gXruOm{>{R5obR2B$Aa_W zaq?-QEgZv+IUn+%XaP=IK_8Yh=GeUaTe%}=Jn~K_5945VylFs#PYmDt_?)unmcYxF zL90h>0)3Zs=acW&o%{74p4zjBA7C{G3bigBIY$ibJI)|!q93Tg^);2S3cn+}M`}%5 zY8;j1`?%kZ+#~kwKA!Bp+hKpopFxb|B8;TyCD3H^HDshof#xUY4>LH|7$at=I35Lt zbcq0`xQ*OJK1EwPaReGB9z zD((MR=@YY_SaJ^zFo{SuXDm>ZEBt23@SG_2zfsHu9iGi2n4UUo>QpOqqhYg7cIpr@ zzL)Kr(vqY(M<#Dw-R;|$?h~#$punJC+G-zt^e1R(P{;XKGM`60KX-P>b>+lPIK3kL zU2zm;As3x9xXt(kmmaH+yA*S=ztv;w=Z);z_;|0aN)bnr&5U|)+OuJcLSoY;OvULBp>83`o5{{M@7`U*s|5S|ZWb06!YDGzhmPUa z)gk$G72+4gscQzxC&Uz9O6af^k#~f0+JxNWmO1bJ_MU-lwVcxyUFS?BWYIAU%q{hC z8=d4B3iq(hxB6B@dliS%|G|-yi|V*JU0i6Mv@wcfLoB#U(JCr%LN1KQu;hmda36(4 z{+^F};6nog)v}%to!u#Y>r|@L6!C`t1+Zb5>8D}ifVf=*TUuHQ@D3Cf=7@w~04^hk z!a9WrDuyJ?{qs|-CLYKhiT8ihe}1^lWQdGrKF^8Dy0vlT=q^KEbQv12oIlddv|0R| zTEjZ-g}(vsMV$3>%k{8z>oV!5Xk=R^Z$?O<1tUO0*zS8*b8(ld3w7UIn0*@wtIva| zahYyPQ}GK$>>3I+-+5Jk6BDt}4=u(C2F;ck$T=3@nt-m6%Cm@rR2{cQX1crK7^`NwLGb#80{q zDEN*T@NndJaw&_!OlzHK?)kUM51a^p8S_IAk1-_71+CwxQIEx;&+W3WCa3;3x=~#{ zb3(=J7lzwx2h$(XU5-&H6P*Y*#jvpn4J`MuS8?s$y?e8VYB{QsB=5XCcN&mIpfZxi z9D1bMt#k8+1C@67{XqSWSWtIKEL%Wu9GPqUK!-UPwv7~d_YkyadC`i0Rky-LkY5PI zYcz3?DWU;_m9r){a-f8B5v3fYJ`2JwF-f(Na(c8jqpgFOvZ&W`pV8SA`rU@W^xmQr)PA6thAo7g%?opP*ZkOiFrqF3NMMRJG?+;dH)lWw>p3TK*sLIH!} z%sPzsAa|zF`gHExxfq=kJ8eJ@?QSib<6(_n1~YJ*=Lm2wKX{kO<5o>p;QO5LVPiaa9D8L3tDd8Zjus zDd4398*m?6jXzSr6MY~ncM$N1KApc!9fWSj*!#f^&-7W|-g>+vQJiFZRMnK9f&V2* z1<8AWoZoMCOHnw}WOW8Yy2;L2h#6$nQ1|X54xTwUD!z_r&8gfJ{DQVHV!x=bQ7I^o-^&!8{Iz>Pr|&= zL~JpAE76fYEhk+GNoTZ-Aam&nLk#h#!5j-{15reSN#T$9(aCmxpqtD}F;nMN#Q>Jv z3x#tP9}i)`DK5212YLxl@ep?YwR#*Xt?!V2WS$wx) z!-o%!zt1EHidZ!Y*PJ`s;W!BFIU`&^-NORQTGdq@5Z!fj{KD0FQD;3f?gt^YGm9+k z*5KhCjpw~Oc69FB<7J+j7B7dq%D`Ov4>$1@8zOo&w8ot;U1fw5%*GI_J=x2{B_8`L zyz#atS;d@RXwW9@+soc7#fi#Om^}v2x z1M={UA{aUlHd~jJpx}F8O=92tEvPyStvo+B9hJ=z6)?3#j+`;$A+)=O1dXB2Gff-( z&|D2wrCT}mSLvtnNj5iZ_Z&Q?7kl#0E;r5d>ovQwwkq*^-(_DPpZ@T4`-Tl>by{zz z14Il6y>WP#sVLcH%j@44PGup^ufP4GtN}ne`{2ysxv#pXOm5HLgk(_D??pfpsT=s& zvd?eD*nsmTWd8!R7aX#F|T)`^btEf zY;1>Z`N9aB_nmh#+BU(U$)R)Bu|wQOZ+UX`V4l6s)SMk>r)z@72x%imESMv7{4z=- z#76Uoyo2ps47>QMw;#3@jVBe8knB|NvN@o2>svsdVWTIZoFUMOqGH{-Sn}yrv>8BH z*v%CW*6^tQwC!OQz9elKvtl^-Hl33}dBf-(JBCe}1f~g8*P~>jk1G?@_GI4%Ia$BI zJd+)f_!f!KfnvXgO6#Nv1~hh3z*UOcTNTID!(J8@+c@n|gnoMe;%r}ta(e$X#&i^b zQD-%Fl$G$itFUg(8iqa>ks7W{*08AWGosfI^V$yMK3;3T0@g;Rv8hB;)6&AP$bv4O zwD-N%L6s*)9?;j((bCab1D*_|f{}QHUZ7dU`%Xy}uDBUGCV>k1e7xZQQ#xAy4!WUk z8@e;~40Zg<++0mXB@=1F?drSBl~q9=wyv~b>oqMavslFQRCH(`OrCP|<2BZxY{@4Y zixE9|LZq*OB!mM#{}O_p0uOX5p{8cVvOEU5HrqF1Ak3MJaNMjK?|i6LZ*O&RAx5(t zlz#B6-~Y4#2TRk^)0OjSe61Z;jm&Mlci@lta&@-syX4gNA>ta&P`?7?hgHxHttO>K;7q!f% zAYqo&LjJ1N^YCLO*)GQ-j>){E9@~QZVfKTKNXdiq<+p6dpHpMPcLf)cKO~GJgkOV- z*d^Jy!hX`?(ZF)o8#+MH>}8bqA9fAdE}}eM5PI#4IQz(WthiqAV8Rc^Vr)7dF9%?~ zF7%$Aal$WYQ2WK@RkSkOo`xNLoOgKkL&N&3LhpMTivWo_5zdHIB9q+E>#}l@?@#(K zqAuP~U8x}DB1LCYt~_)o?Pxq*droETV&u`S*R(9WnNk!Kfh6MeN7ukX)rUfY4>%tZ z@}!({MeRfYLT``{wq&PRR$O$}p|J88?oOL>4IB}+h!&#rhG&l-r(#s)=KrO)*DO>9 z!FToYX|qL_9?S*o5SgXR31or-!1`KdDtuy52#MNlC6X_QkIc)7&tteqg0tH!_NW}!TpL0%Y8 zrr@tp$(11y7gUbCDRDQ)sftuTzNv+#Y<-~aSB25Vm1zCGPNwylJMYu@81Oq4KAU@m z7pC6B#JiQuQoUQd)A*p{YL|#Z@#R66l=mcCHG5w5YQS)Nk=M_1dFwx>ovpf>su4m< zKH(6E=R|Mad!afYY+DGfj5nK8s-9xQ4Z>9!{Cj#Nwee0oJQ$3-MZlcYZ)%l@R5g0a z&}nXN+OFvdZ{2%b&WzJ&yBfYF#ANlQBLG;;zBH7=K}Vs#!ty4D!k7fS~MHvY$VcN-NbVI5D<$o z;L$z2>)SMh-3khrJ9trE2Ck>LqV=`*9!s;|6~atRZE$ICLG`;LK=aqoh?~|3RP~wVckA}=kuvMGT}_Nd9e zlG2nm8-kcb?-ce(qxaXUh?pV9!te7Rz0G3L`jowRL3uKy$s0egp9ieKhG!P*#c?$)Y?!$ywW7gSw^ zBkG;^zvR&l8q|)?2wCqqzJ_Fq^v$RH)7LJve?NV4-(x>L3!%T{ncfl+EJ z1QSJo2?Bznb6|mtp!^EE^YJLS39JIM)BC`$8VyF+rkd`2kCKpTvdouq3=yLQY+N|R ziZp?74qn)J)?W>Rm?rrER}7f6{*gbI4K-4V(G|HL;SkgJ6 zb=e_P&!_9wjxu?5{(7=c^KBcjwnAQf6^)+A@BO7qcmk!cPy7h}|e(mPX`gB7A z7OwbsW3}$=Q(1z5l$30BNk`|ZcElRmga#=XoDZ3D_rIFe*u(rEzTP{Y>wo|Mr=9XH zZE0vpr4lV85h_HI29ac?<(;imA}tzXeIq%auUa#l4KkoP2x=UEMk6|kvZ!68}VG7zM=sBtRla$1SI8yhIPQ_HR`ulgL z^8bQ7Y3yAXI`jz-XOQ_d1Y*jNCVW3(&3ge3#m^Ga=&;{%SO>+hiq?e2c>I$$SXjEA znxH*$TP5CI$BA61(X6IPqd{P z>svZx1>CJHSs#`AUh&s;P2aKkLEAGQ&CmPrp?2HlN$tI=Vve2ZlB+mZW?q$>Vx01l z@MCw+I4uk=kU!R0{trr48+fx%S||L7I~l&fIs6FV7&bFlaA*3b?w}AB@+gsC!ArVc zo(01WB4&q4+n6>T%tKlz?GQBo(h%UK}pk z9Q;he^hLn}w+iKZQo+$DBbcsZPa@>Xn42?Uxdg~Wmdd|q?FBo-bRWAbn>M%y^AzB} z(X_z={5SXg%VGAGAm)=p6zvbv)qqc$zYoCMz7~6zYFM-?S&d|!$J-eP(CEvZEBCg& zKKTKou-L&NP(}0^v3(vERG)m7%@Zm+@ao!hD55RUu9(!yWIcXdP7`C5;uQ9 z;gnrw=mVqJjrp@JuXS2;&UW$6$y35(1A{VeN?i<}b$*uAwr)M6!fj5iyK_LS1fZ#f z=xJZfk3qoLSEidz-_(g9Q1JCwb5%M?vZSS)KZK?hn^P*iZu2%~xsUaW>DFgZ1~qlW z9h1EzLzy)Uw{H!YI67F${ZlZZ`=C{F}_NK*tSDdb+7?@!fcSl`a{r%UkV=sTpL@2%bh5Y3+ed3v5%7+f}KPR;ox*m5)f8Unf zfhQ>kCl^ab1ILQvPMnQod%+t(D1Q*#=n+waA;$O%=u-vy$6KwNSlIk=WA&YmNaU_( zJ`%EKKxT;xPypg_#bkge=RGEZkr<#Lmw5&i+K*0M-Agg(N7sWBm*&Gy74MO`c;R|; zR^z(`y%>pNt2DG3&5w-<2SRZbf417Uu)$vLewQagd1aW{?ubI5ZKw!NLr~dqg`* z0ZFvEm3C(Wv*#QpR~}Yrs^j>1vAs11F(!{#Z$TvMGOmL1LBe=hW`1~jBcq7ve^EZa zmc6$?F1dWVJQ+iLB4{cMd&mKD?f}yOS4A%-jTR18psU^y>X_hJvwIwlYG)TV<;bzs zZ_Wh8n=W^)2owRLcIqKeSd(>b2PNk z`IAe)@I&tfca_CE1`rf}a1n*RKx@7(4b8n?9 z?w9`y8?N~D=aXye>;fFvSBB{?U2JX!f(s^P77xUP7YcAyqKl>EghWQ}A>EV&kl~}k z6vMm{m)jXnySlP)O|{haR<)(yV!l4P-`vIKQQLWYv+uS+VVbpWp~EbjHPgn-H1J)j z1!O%wx$l?Op)gIuYfSgfJfJ(LKGm$t4rH@`AmMRF<&iCx4cFkHad!sd>yD3)=Wy(m zVzsu|dIhoC;QatO=4UMzTrv`ibw!d9&w7U$P83b19Uxv)LS`SW*QvH=VQ(uEP)~7Fw`dQS#%L@nZ1-e_%00)3tq@J3&Ez8-4$Ond3k@B!vi$*b%YW z3%Jg}T%ZyHYvVlAe-)xxL!uf6K0p0?W%pFlNi)Ir zMkF=87h?y*OU%jz5j&pHq>FV$eb0rd+IuArH>n(D>fGt&K1hr^U$`4 z>zdDem-YgxGU(EGUbOj8FORd`M`h2w3%s&*SyP(>N1akPrd_MqkKNHA3 zV&ks~iQBuW>YbCHfA&u87mI)0#?-uNzn3(^t1mk4l>nd?I9G=@r3fsDAljX*Nq-{p z7p`w-@-J@wOP8ON0KgNYyjDw-9Ss2tG|Sb+5+URg$e8UB#Q&<8-fGKKuU~LTXHi@- z!Z=#@w@wEi{)+8)K6Ty5lCMkIu4FJOC3@9M`qD{wA`q-WQ3CKt(u_~4cAtX-s$HLH zKjh94+O0Wln$a3Fn?d&R{m(yBT$>srY=Ef3a1xCqY6^fm3~&w9*c)YGGsYj3o+7P(@3CAVc;CLgql9RkFRtq7Y`uNkP34fo1*c3Z zxSiw%Uw?!WAR#)s_@aN`iT$OaHptXBjJt~v{odv?I){(k-(j+*)1Kht8%o#rI$7H^ za@lXAk@Ef~dcGm;b?;;a2BwTj@zH<%X>4|siQIMi@N?(RiNOohLiVkKs_Oh77H1dA zHE1ghc<30+bC-vcqV1Sa(PzlXEs0$PK0ELMPW0JYyXYpMUt@_Z^PCr#4Xa!VO49tX5f(oYzKaz+&<#3;m z6c^-i+rrb6M*B`*gEC803{r0N+v(*kSd~in5ROIBeJZFwl&B^&!&5AjFdR3sI{Wt+G{#7n(8$NzY=Jn=_ zPPu~uK4A+T^0t`iJ}<@1iQ9tzhR=8bvXs(6c20el(tCyI!9@3wa=7!?ABCeHScGMx z@IzvIVCw~*5LYq^*Oo2%koqiON_^|iSqGl^HvPCU;8)eQilOrU2*HCS2FzQ^S-n3V&=*)tyb$%W@WI@^Ws+v_EZ(jCTzoq<6Lcfx?%@-@`VniSd zUt?Enez=JL7F%-y0#=Obqd5oy2R&lhhfx=my)sVZq?`R5thHqvs1c#XtPgwD$Q3F3rXz{cf4y{uEuxtEetlXHE}pk#qp|Ld9t|)P0ND&OY8R zVoUUm4bLmS0caJ%j;p3S3fZ_UY>($tYo*}21ZT(GPuc#@4pe#M0L%s@j5tipAZ!kR z%E0OUI^~KU%JT07TRz9Obd{aRoD~>KUc9j!kYFMise<$I)6oC{@^~>gDat0dH8U>H zXsWqqbmr#;AIoc8ELZFI=mszfj&dJe6O}_)D4?47Z36zmuZb&ofu8qhtCR(hf8VQ$ z#*bZMVZjF)Hn!^3eGmzE={A+-H?nR5$%YI@wMHaH~L zZv+N2*V?Dq(!rw_YG)HtgRm4|f}tJP<8r+l-Ehc85w{@{d6QR!OjPjl(07|2_G^2u z^uCiu-HZ|Y!kOie_cK`k!$~A?TUNbCuhMC3Y)qi~38Gx+442($Bl7WaoWZ-yIBwd# zcke*RU`n-w-pO6s@`@UNr=7ljs9VIs940o=r6ai^nQku$g8f%to$fgOiHXUuMG^E& z&OXj_G}NSTYwMj+guoNK^#1?)Y>VYt-|r1QX&5(@S(D!UQItV0=z&cqNg4N{%k}$o z9en>#%4UnT%cJ@L4`AD7*W~~VbYklt);aw5br7GL`4@v$NpCtb_FlI|LqAu(OiHLv zN}DmEC?`box#f!1TVE|!Iqvy7CbH~xZ}HQ%sAul0x9AzBXmC^}FsuyHJ2TP~Ql~AwzrH&6_vtH^`41*^#(G z{!_S-?>2{pK%f#a=#h_2UFQ#IF92Ym4@sF*L%Vm%-1DC{S7iOuzyeqffu!Poh<%l$ z{YjuxSj|ceg7Yu!@M}VA@n}Is|KRddZp{t9&i6LcR{CejlGn?ZTmkO3Nb^+_0XlT2 z=r1opX|ch|fQcAd9nHd^zmuNu$#kXcN&9XfEl5_wQ;9Cm0Wq6j&MT2mFjYe%iK!Qy58+4~ z0VZ*OW^6qL2>c%f=t=E2X&dEufs|txe(>}2Jv(A!|EmQ!PgC-{V%pbMAE}~rEzK59 z1U(GBavh$Az(uiO%ezZ+_q>1QI{4|ipD~1; z+~s8vOLoB;N{|9dVEGLHBci&tj=W%T=7fbkt&2RTU0p~dOF6U$;T>J57N9Oj{-*+J z`8Jn_`!+bXebjbB#Y{z&6t!_*f7U!bDlHMQF(58(1OeA??S5}~hcY&6#QKb-Z+7Vx zbhwZgr!_{&T=D($yBPvEq)$c3Y~qr9er)93Ii ziG@P!_$q+gPnut7x) zJci|K4+6+x>`7+@Fh>{#R)F4*mYhB1u^`k!{fVpbmJWw48@C^yA0K>LT5gD!M+Q^y zm(kt}ZlAEcB7`ICUwOzC+x;yHeIB5>uu~AO;T<`@H7cRn_{<=qz(n_+O9qLICZRXZ zpU~g4V1#;W&kK>kq4ui>Mtq$3@5zZRsTXO*M)!I3@@3e))scf4jt2lU0hLR~tR%?&7vp0Mos_J1hw#Jqo(yR{-f>p5+Vyoq znr%PHZObE3oABh%efKF!$QAvU3%M~)$x%bU zAfyQ53=seuQ5yexd12Jt#`iayKVR%;6aW_SN)#<+IC5Gm1z)+9-ht8+>Q?=`N!&W)?qKT*NYCGANX;+3{vR8Sy)O_~r6(^=m zxw_V)s%r1g&%vinXBtL6pZ=yI(rx`e-#l_Ed?Rz6$_>$4^YXaGO4JizCYpK?7QKsY zTqV;K{v?Q2$)s4dWFCEUOnv*9HVeBincQbdV6aZw;p7|5b64pZuUDUYaP0U#lIfe1 z>|fu$T^z=m;F4k9oe8h|)m^N<-r6HRL3i$`ecq={tn*g5t~t=V zXWhAku!JcVm!@rQ@rCx=K?N_gXjp*u6M$Jia$o?J8i{boruJWr51A$UuDO}a0bRm6 zN?I*0uS;6a6ucP1VTB99SW5nbd)GdX2ff<^a9H~F>tlgHBZ384JB}I(^SdxS(=Ek0 z*jC+u89UhIXS=ke<);hD)F)N@^zUz}mo!TDKaN;X1+$Hlj(g)?s9+do&!>S zjZ=+6$5~ri-t|QdA~Kk$q#`%e`ntOGG!$e%5dwpZRn29UhS#p z`b#!-Q=awx)yrZA_N^U30f3zSSUCI`{Sd6hz3glup>Qw>_+B!PQH3cX22zd@PZ^9F zQKS%J)LxfXbFc}2uuZ}KM(fP2y9WXYFpZB2&=@z)#A*YPZt{QfPEGwQ!^h8E=17(9&1mn{@smT^mrT%eus6$p=(p*ga?(ndF$kOWvD?GYMd*vzH%?w=-sIye zkZsU0KY8hi^GF0nu6!dRJfOOaN%~96QN`PVMUCO;@D!IpT$#4=I2@P=ME2Ue>|ZwI z?v!9!5F9;;9fGFj=O1Q)H4hSB3JH=>gaNY&n3IVB%JFGTHsgv9m+_ zr|Inn4=TPEdi?aGLxMBPpj#2owgl$@JW$!n_#vrsh!SX+=U;bs=04~mH8~6iK~C}* z37!`xj!S#{6>=I8Q`fb@9*F!f`u7>Uu>~zR*FO3l^gTmCrSki*(AJ`j>MFwy7+k%abqe!|Z~?KV zx(s4%PK0`It{DTj2o{VRv76g+Kt;~&;MOgZl|y=J2JhM>k`+bdB1+;k5KkV^Feims z^xHh6cZu#~iHko^-8`s42ey?{`RVxoQ?F!x-NF()zCS%h%m$>_bJ zImL7$RZDDoqo9~QXr>LAT_lapLc9e+=#l5C^tCYYiP6zhonBR40jmrN4c!U?NZRR_ zFRR5`SisV}=g(&)nzPeWhw-bn>)K9%B)RWHYbD}z;f7EciM65=*S=Z&DxSGkNC*nr zp64TztfCFW)Op3{7#@XiUj2(ixZ&J2!Llbd3rizD&{P*NetQ)U)Xpc-`-*Wf=7}1GJ9ng-#Lg!5Up$$&-$ykln7%G=G(+Uj_B{k`OQmfPSIt8+>A<0)H0jQ#4% zZAQ&?ctJ+}E3h3Qc*5#N)PEUFB}C8rFJH2``ii)<+|OWUqa~jOk0-Rx=z#^X3aKNu z7y$_~nRW^MEl@eCp!s>5`rbzaNOx@@32X4CCG-HyAQu zLWn^663y;>I=AM`ue7t%enKpV&mgghy;r{79(L6 zQAADz^GTd`Ga|lOJYYhAiI!vhMhP}O&J>r2Pg_);9C%fUUK2Nn*w}%eokE#8=IUO} zYun8hoNg;We*T`as0ViH55rD6j>Xwy#|LEqGmKk8s93GhpWsu&saa6wt8Z89&yzho zeqbZ82yOt-d)fvj$RWI_6b{3~0pLz|A3ZX7m3#ZP1mK_c(Je24f2qz7I5I@HaqnIL z3vPwWJtuOS5uaL&0lfKQDX{sO#UV@qI_i-CZ&>*SstgTk>*8YLy}4hTXz4*(QGrVa zXoPfWCZ6&zZ$2;X?W`=LA8-1z8i`KY^UI4ZAkqlN$?6c6P}V$(-5_+#>JQeZ9qgx_ zv8y+7%FFf9#k$30sezG+4?tQ<$7JapMD_R5MAwwSI}xMnA@d6JKldvh9u8Hb3Nj3l zyNj%q{W3!C#ZjAo@x)p}UYfi)g;>g7Ap5**USA8vNL0`0q68Vpmp6)&#fQfZlwxWy zlCW8?mg3bQ_oQLMft^xm+df$ISLxZ)aCgMaXK!DB9Lz?rv!g`>>_k(Ub|t%gr!F!O zv4cAoaaKIkWJAL4t{-1r8N(ts;ntvEQbEv@xYlwHx+U~R!*1q0nls19Xcz|@mvalx ziI6jpv@z}j4rUPqi8#a-g%V8)(g9=MX08|#YLb_?(mIM$l$$SG(Nkm*tbYL9vIb^3};PVQwBP zH!fea!fpIG5faN&wa(L1fsNoGGiDdxPk&n=I=yMiVgZJPSU4%vL9E%+SE=asmY>X~ zXrT*d(qHcAIFqMe&9_Ne0)mg?8b(zWMm+$tabIZ*jDU2-&drJO`72CMKdt(uxpm5c zef#8}4&N9__eS^)_GMM-lJhV8If7nSPpk|!G19FHvVAqBzUs4$ z%rn61D71$a-Lr<6g~)SJ=_ZKj;ejO?*E(S zy|stk5nZ2%{yQWT)~m$--aIUff*ND07<;sAfLhiq8gIn3amk@o%?`G-Y89v$%LMk% z!%l~&yM8=_BFu^4ADQzx#=t3*LD-hq~Vt)g&$e56P(H=DvMJ&^WGT zP&>7vpTZ-|bdmPG=*_JOETnQd5EKcNdFPYV_2`Csild_4@$`=_PSVj& z0)4nsKVDmuf`IX2h?q%U&61KKV{LOR3LMBy#ZE+JFXjR`GCT8;E!SjNXjumbjyO>4}qh1kVw>$knzZc=vlhTFTk$X=S+A#rB>uXh_{wI@!Qve`Ml z{4|)DG87U$sl=KGHmHuV8_~rVc!ExfvlQ5S_LQO#IV6=pcqCg0H@Di$hb3bjGru51e=D% zNaEG97``i`cdT!7ZA`vt=VwJZme&<5TYoD@-hWV;8WopWoPBOh)dGnLQ>PxnNH4^N z2%r$2l;XY?v4cWhFRmw4HWl=EQWQBwM1KjLr7sCBxR`l>=jk~O=~Ol4&vNGOhM^VX zk3=;Bch-WnUH~SBt~f~^+_@3i5@_QcS*}Nu9SDVhR(7O^%|gXq!jwQ7G!YiDBa0wt zN;n2Z<{pQlt)1O4hqluFlqE3Sf@IC2`xUZgn#%Uc)`y6y*o%74+1Z&wgg7dL^Pb$x zu9zuonOk8Jl!pwFJlUx^YOM6>)>aLlDBeVu@Szh< zXqS$7%6modX?WTa?(=;F{o@_54zCoUJ77e+T#p3C)03E=UTEy3q@rS$F!=+WtRTvu znS2JZH~7|*SNmAlklAM$jOgkNwe6{Ab)f0!L zX_sb%K`@FOV!6q|yl95TNv@fXP(AO4V1}7FiBKikh7u}Z0@mRP5g8G%MibBX+&blN zw({MML!5jTA>B5~U#C6NPJ~2L8r!})x)KW_DHxqrxG0yqsv+|bPdjJ5SV#t1k7Cma zxRQNp^PNW7GECu|j%5Ot5K+)bhc^BEI)$SWHJ>uC41%~52?-BqGC3+f@Pv>@@`QC~ z5+o&Z9ArON$!UDa4;T6HIg1IN0tck{wGW&jxgvb~l)ni>lCS)h^P-ZWxc;1-ql44B zVehqWEjpt&zvK2EZ+BXGPjXvzLNg%n!?T(C%12`k%wIS}z;j}=%u**@Jctf>t9GX5 zpEEt)20EkQv&dFJ@JAjW1J`I9qUu)3eJXp}Je4LAwM-r$!_{f!ZNT}RzTJtQHg!x( zyTfKb)yrkPf48cb$ml8M8@_z;AyYeBcJpA`5>;TW%GS+(_&fd+#ds>|D**SOoG64R zY{C^w3u^Oc4K{e3EJEbJtkUT&HSAn=Ku$40CftRg0`*_Ek*|Uq2 za>f6Bm4ls$Y$?G!L0co2QlQi$INQQ78B4 z*H5_Yi9{StlP}`W_}7i5{HD))(%E0M8&6M-7<=mt!>w5TEKEv_2NPH_)J^MXo$m9@ zlrN zQo!51HqyIi1`tDUQr$-Q;dsj}okrDjeUUE0&lp*3&e&Ep;Gy}aJr1KUm$INK=CWMb zT@Doc#12k!o~b1^UY6Jn8gasw$)oLx#wQx~6-Y_2ZR zWtCw%fM&!PBGsM1O4z<`b2LJ`&F2%-L)DEhkQT+Zx9G#+lqiV1n z^aGbhYtEomLp9>x0j}}1m*KV0zuZ|2|ys zOuPHmH-C20=qT2p7!h%sL_K``{JHe~`xy+^(|=XBMQctBHh)SLWt&l)S*2$C=M-_N zFF-GK6Dwsa=RQ4SohqHOB{(m7gUF=W)q4ybA1KCq4i!PIf-wobz^J3jW2-wSJ{Y>A zrE);up1Pf`8kVuGx0oK#o|ZiO$fO`Xw}EjN-oULlZ~oS3bsHlEn0qZHMPwCHfk2wf zl8zpeYvS&fl$C|9Dqh9hl4HUJ$_>%8uFnkc0s~S^LecsUX+q+>^ zkyldsVmB*dne=?B<71c!;7>1U$QA8)+Y!Q zgGrIPJ8qFrN1g8Of1}t<-$T5KIHZV+!hg?_$<1knFIBc5KRyJ^A+yfgwy#~CRfoee ztF~@hu+iCI2_EWmN|DL8M-H#L6`(NRc)v>6=pPQw&V~DQS2dcZ`TDTb;xPhwqTFvY zvOm~G`7dwk%T?epVJ`;v{M z4&R~ap+~eAQ&p8m)w_WtuFIC_WXKHKDDj5ZSLw3ZSlJ8!5TdK&OJUe zEoaChd&OQ-BBv3sjg7A#5JSBO7r?+4Z9baW_%<$mcv2Ba4lr=dy>frDxevN`fv-AQ z)Iq(e5sOY1ptsk^ah*o&__I{4(}<}2PyjOzjzz+M8%Cz$=CSfp6BCm&Rw=^T{W&!%EUclRH0_$bL3b^4)&|wHq%OA+ zSnks8+Xqhu&l&xrXxS0nX3ZO~ZsiWr^p^Kf>@ao1cO_=F4#`Cms8VHABzWoHaSz0! zPFcVZH-Xmy*vjH#UCVQU{paM5TXN-0tn{E=Kepf15a}U=`4E^Fa-UU$K~Ygr^~DXq zHSBgcMo+bzN1ktV@zSLjDxIi4Sf4R9?IK!=96*W<2qD2cc6?SLz65CFFNZq~G(*EH z)JsAz>lC$d;qt+uoo3kn!LwL56PH7gOMff<|oJ5Voiz>`@x+mYcXK;1!ar0HvokC;}&$)iq zy4G^P{|?d}LI(ABp4gumXf7%&F;;jSiMG16DS}D&Q{mFb7f~+t8NW1`#*^V#oHss> z`h7J7;HP{1A?qcp(Wo!8wPi$;jDD;wGPHB{zpr@t>Q!xh<$TUM5ys0yq04qFl(vb< zVy71ynC@bwLOjU`HH)_}Rz+D&0A0u@#Kr@Pi&&S2T!$^LI*>D68s4-&9{Y#?;9|=E z>HKzlc-E?P;IP(4bvCQ!E|@!Ot%IJYYlr7{Yv*M>8#L|kAg|KF#inVa5iC03NmYMr z2FNAieWj}GcO4dFhtM!1$SUS|U^sRiYuiEO4lo9Vw%HN=SSr)v)9?2^K43@QkFq;1 zu3ST{n~rypQm4Hk{@c|;S4wxeYiDRKQ6q3-wn*n~nkyn#FlY#MBH9X;Nr-RB$hfJg zq$7C>XOmhfH7sHuGLj8Tq`fGGDliQ>AZQy9=#9H^+lf^|sM=^wfYY-SP?SgBb_lFMTxg4qxX zj-=0VsnTP=R*ye3GmqoQ4Srt#)dDQcd0Ka7%ZtnubqR*0IEEJW-CC|vK1FmlS8Q8f;VMyx+efZPtW`4hcKq8Zu-@vnPh9Y9|&6# z?|1wYX*u`sAuLj>X{xcSY=8%YZa-N6Bcp3Nw_jKjIl*jRp0!ZChBo9P9s_Q9$oV$) zVB+z>0%mX3j(hLOe5jPy^fQyCcr`LZ6&CLP<~- zIH80{7@#Tg{!u>)$38$r zdr6QHb1tVr!DJe(+YcXh;^30^>JU@?oqbPRaE=I<5p7x8jYUiOJ|Y(jprlW2*FgW1 zE9|xH2i>{5Yi0)qt-B&^O2{KoZ}1Bkro}4zQCTk`+bBn(!DBkaxz9Gg>6U8-Mn%Z?vH`^x0Z6nem7zU+p7!`a&j1R12hd(@q?@=F zu{84zY(~ZQ!JD*Mc?lC-5ToD4`6v?&9dQ%NYS)2N6csz+g9oMMm`+FX2PSdzZO=Yi zdl>%R{8b`d_#@pU!PbyuvE#?RV6_&WTQi-yt@TsM>un;xRd6ngJfbJ|q0xcHG5!&3 zUzq<|uc|)YKdkeCO`ZuJ`T;$nUXq?7fF{hya=J93UzJ1+R8Z^BY8Hzme6I4`|)5VI=^2?UHX={Od)OEs+X;)>bbS-!F zId``KOJy{}Z{YfW?w&SwQ#?PvEn5ZW-k-R=ma*pQY2H&L%%Xr22gS>PXL7pr4#FHW zVU%Nob@QHS@Mw+#kq!NHrM%OPey8i?o!`YScroQ-j+e4^cdcGD!?opIzFR{Mg*XKa zMaqjz#w5-#OvE}zKc4KXrR?nJxb5&^i*p+T{Qbq=8`b$gFW|)xtgM9dqhyv7kNiXK z&lm^EsjGX>YKjg#0#8MoSN$hZ%nHs&OAo+PgZQDS`B=~qPEkgtv3XiTpS5`L$KVAg zO}7~Boiyk5;9wP1Ilq(kBc>1RWUF(lL;a{bHM&g4Xu<^T2!#*SAWS@1<@`{av)YC~ zJ%0RnFHPk5woq6FkG1!pEHQOn+6z6t; zE79$myync;fiE9Mr+e5=rUheIryoG5g=X%L#3n0(UdORZ+Le4RNO2^+ln`O)G?bB~>X-ibn2{0nD zx(u3yr-?m27(=A0Xz#R2(utz9~*PIqt%b&+vL9`qo>y=z z&+1bk+iN#WaUC0O^Lfp^T9U9Rme-^cp`IF8= zkDz~c)(r4~2q)vbzTwIh*=lSOq6M{TkZFJX!c#ISd25nhy^Yv;^Tq{xQy1WYg0=g) zc)aq+b{qV^aJ;~2);iu*lh(dD#36m$e6&q-^#x!hjNsjdo!G3khzt__b0QV}XI+Ea zz<~ojR?7Q-Kb~^pgq>urc6Y5g|9N2^F$vK=2{SEt&RBl)b+b zxA|7pacT-)r_YMYny#Hww)S$+zYDASri9IlyfY)^>CcUB(}`k6*|Z1pK;+nR%n~Ib zmeV^Qh42@LFl~mE$2g8wI^29bVFX)PfA8gpVt0vuWTa$<8D9=I-F;!_phM@3r;UsF#aY z3Z8NcwfD*8Rg2K>d9yrO1Pp=BYi|4z3MSH{PoJ6WMDp~!J}DApg8eLWWgcI*f0XF< z(eumX{6~hP7avwmS@N}qb!+vSq>qkKPSScoYrpGHNxT}3wm<~q@WVt?NcSo>>e4@Q zDPw?*iHpP{W|#I!=eHa@G?MXZOrwG(>?IYL+#2zPvZAtcgYM&IZgkQW>xY5VrfRlzkQDIkjDZdZZ?mQB+Z1rGQG8>@fH(zrCZ#b1tI&Rhil9ac%!}TaG0dz>2Y>u-$mNAPs=fm zrY@8m7>%t{Lf@C`XJ<#{$0oGh*L$FG_MA0S8h&+<(Z6ll{ax_iaa)}vrfVA-$8knt zQ18q%$@%?2K%a;R4Co`&fa56YZT78={mU*o?Cjd6SD=>h=v{s#x0XHAd#yO4VR5^c zk7OU74ZiTvas2LED7Yfxgutrf+1va30F2;(UQb&eF3{zB`J+fa#Wo2_gar0|ISC8^OqDzJ$A5vMYD-uYHsaC7-a{vP5{#eR7ZCwR%d34OJ)OWhNVC+2;z0S-&PfUmK@PV~ zkDH3cY)H@QE>C=lT#|clm~~A!^O~xM(A^iFw~0av!9a5#)KZv9`9EDY=H%z+i})ci zlJYY@DotS*us{Zomh>b}3LQz&kKD~REOaNk?Bx6^8F?No$fb|=J2LH67}q65Fah_x zpzUF*2*FXA@XbYU%cd*GtzPZZe{gt(_nzkt^`pZwpHG&~{M?UP4Y@Q+O1aD38>aiZ zR&vU)=epAB0kBRF(Q(o^h_GV;G7`$rd!_WdGlvef+1#atlHs3{hCo}WUq%Ew#(5y{YBtaz1NdE-!gb)*WJ z#jtNK$yJp670=Gd($+I_wfUD6d2x|w;<-rWOO91eN5GQ89+`7BJc6pM-XV|t&)O#D zX-bhD{w1?*Z-7@Ih%T7R)nq7)#yCXe<^|Ia)`jdKR%3rRLL7DW$yJKXQ>H)H=2{1A zMD6@IH3hPSskh*0 zDHCJf%J&t=@F3pQlOD8m{|;7ehEkg_NHY6(2=584gHm+R77R3=%`Fm%6znd{c9+z5c8UwQ-$hl>D8r zOD+r8pXWP9yhKsZ}Jc4y+CseEy97xd^HWAol@tbY6$4JV;BBV2hMT;L&B=C#J8P6IEBCtr>A; z%P#(9j-rju9o`e5EgU(F&qM#W?jx=XX%XWl$-Y4M5iQ+sGwxOlkHv2z9=f#{ThH<% z%i&*e(ix9Je^23fiF|o^cS7eHA5YtZkL=MUrMY)bVzDh-5!xkhZWKIdZ$*#)%_{86 z(d%BDATL{hr2V|+GZ6rn>RqGJ*)M$Hbd!V*L;bQ}9J*Bcp!bj5K3lt4KT5R}8;V=P zplmPVI1zLgLjflM?mlHi1So4!z#eDHBa#5~@Co0)jF4;(vy_&IeVg?|y5kwE{*^VO)cjXRbm19mk!quv5*DZy-Q&brY{kh@(&6;j2cCM`@Qa&il{_lN)=~CGYhuPZ*V7 z_j0F?gN2rNwI53!rW}oyPt|Y#c;BYO^yh{V!RUabep`+Ni#Y0T+oxSg(SPvC2NaZ# zMB1WVu%js!-UxP)z-l4GZ1bP-R@1j>A74rt^^G{RxFh07Akt;jr}^H)KyO3Go<19% zu<$gieRY}7sg4z|uE2o^;}B&8MtD2^MOr)VexIyzCsN&*9zKLMeCHR%DLulU%{12Z z%3&v!ppZ6PujA%j=t94JO!dT_!d1|t#e#Yvxau+gWOdTm0Q-jfL7TLX{9_qO>+$@$ zg2lwbyu4r=kW*o2vwA2X9q>_XOo-%@0hv#VzD32k@QX8Q>k9OQG@jtJmaO%AY; zjsnU7v6zHuJo@a|#D%8~=FB03Y|*PJ2yuz%a>ctis;ZVB}EJMShGUe6*MkTKI_jf&75_H4P@V z+ie&E?7L%2@AqCiIohG>-Oy!g#$9c6%cvvUKiW$K|Rr?`0H0=0F}DzLF8VEZKmp3T?L0ijEB4bKqeMO+`d8E zdVsn)aBQCMdfzJ5JA^W%g*%1!r*^?}C5|zanJOT1SEo4)>BiGZnjSNFU|O0MEp!~Z zSQPA%d5)uU*n;wRovh`GhVC{M#BH&7a<0=CRFT}|ZG58lY<*l5^vnXj6Q`e|Ltua_r379_+bfJ?bpA* z8PC4b@dZmgQ{K1gS3)YPHv3!pq2aw=LU3nv-V=ZF5&=?tJvh1QQB`Z=|6 zxq{38q4RjL>T^i%c>h9=Y$L9NiS{I-dbz0oqCRRIs!_v3fMBvV^UE8q*#*`n0Y_nt zXipCu%hVb;X+cR_^SmByDd1wmrjTq({V5H6!%O(6IXzGg_u?gLA5HxNK|pDpjxFK^ z3&)~=uMOIsk*Z$WLB^(QEkq9bWlUDb0SCoiNp#zv-!#qC*PlR7M*UFm7@8g8rfOYyM>nrS z_o!R8$SsQbKnyz6V5pj~obPI{|Ad@^tNVF6Z-7e_$z4Jb0F*k^dvQB4S0n3eBIRJ| zbz^!kG$_wCjxAV=+)JKh7-)V5!dN*&1_Tjmuq)5Be$^+(Ye(GOhBnt5@MogIlzVH{ zXA#SHxTh?NdjO+>_tv}od-!;pc+A704yW^^g zx!~1hk9y({92T}%B33<5&;n?M5SF#kPU!G0OPJSb-5HHqeA^G;;rM^E;fyK<@MhxH zGQc9GI$^3vlu~fA>j%UD0>1SnDEWNEc6r~Af3*qTXAA9QR;YM93ABINf92;a@1{44 z4;ns(8>n)SsA)cPhz?qaPtfWtT|cis=RU~@FL}<87(VH&ba#4I^R9qn67QjRnz%>2 zPPF#L6&2foR*^jTe49cagZD`=b^?^+g%ouQunmwL8_o9{(_|$5coshNXX&YMC?G!} zhat}YyZ(rg+M!`#`oz9-XJANI0hc|y82SaxjJN+ARo_!btA-?d&}{z+n+9hh zxssiId#%QWf6^zOTon#S@I{0sfo>i6Akv-%^Upt^-FuLkY53#Z!$(Rm(yoye4NaL~y7lle)=;=B$vQ)w6&9Kou>fT+sHpcpTa<6)^hXyE2IH7#rCN*|Iy3UUBdLMG)iV z+vjc;N%>?XjToDSQRq%qR+s460XrC7k7@b{A>VQb2Yni&2~(zwq=z^85pO1~XY8o3 zOL~m**=Ct1o12_0e*YY>HM4Ub=eXD}mG-OFNc*Q(DVs)t7;CeOrKO~n(+jfJqEp9? z9VHlkrc#@E<|`2AR@G}pHY#`wG}_MzNtU4j8GJTAdxYNd<;#npQjuC4GhsiGKl~ws zu1r!@EoDRzv^#)a(uT&h5kIc`o0_&CHK99!cwC@IIXUsmGj@rwTgcxj5mePfq=iAu zhwYhwUY4W}pZbq7G#8GfufRH(k#@}!h1k;xTpcJVA3X~6`D6Z-*fW(}OQV-H&{hcj zXBQ1aBrJ9t2+SkJRjBMaSXt^lbobkv8~PGSEvOl8-ga?jP`brk55S8+lg5UES>|Bech{sgNM0uX-f8bUMU6xtOny8r!5;y#cnUb2~jO}?%5O1qvcCd40ig64C5s$Zz#9D zJZtyJZ%6MGHl`JV;D>Vc$nMnC6<i{n z^A`(dPMlFpHL!lLU(d!S3U(dk?Sb=O=bM=cx|PeGl4f`F-c1Xc3;r*X7k|nA((lF! zCF{{1G6TN`ULAUCLzsp5D^bmae|C z4u8{BAd~~2e~g)xb7{37AeJivpNPf{w0FCA8>dZw;xGRfJK5K*&)Y9?{dcHMbd|lT z>OO48k8SIdUC*_=|CaHN<0@`OsT#2eOJ5q-`S1zGK9k)+wUp{TS}b;-#{=lx`HWej zz#IXt$LIcLqR8Q|UA%Hd0Z^PC%+A3fgWHN)cT?IyJE_2`=HTF8!F~H+G~-%`-kOuN z7>9t!r6A>0hPRCjX$+JJsD?x|w}jaP2frPq4-{?Ee9x6SIyt!!hd|HnuD+{})A>~` zwacZAm~rV`Ln^Kync6uuelPUXjdvBhjFV3JKl4!PEY)KL3~6>!z%?%uiCa4V)dF0b zlJwi@KxD)}+H)+s@;`M~eXPu@TVuSS$?xX5bZsy`TB7^BG!W7dQ+lM>h(Q~ChwYFH zSH^!J?ts^s$F++XfLVw_g9)g!6EH#S3IV=Qz{G@RmVLKp!_S#|TOWu;=fQ>~n;*qv zrbpdEZUZ9cI#8EA&wQ)O1}>HG5Q07BwtBVGd3f|MlzQIbncmTe@byamU&^}j&MkIN zO1NTZ98)okp6JjLx!y)H6dG-^Cb)&-L+0#{5QaH>LE9vs~0ma^V}9{aGqJf~#a&t%=6=sM7_90a!1)L7o<>5IB9!JL^3>AZVd`r=dUY-*ULIOTAbH24< zmu}LjDT$w()RDVpX1B1@4+UhrKY2754&i!}pyPUy&ml?Wy<{ZzBtqiX|$)kk%1blGNl*4in; zX9TNq7~&}%uwi#zzLW*P=Hx~dn0L5>lZW}8EHBf9qs`R0x?gL(*L7AEn>k}Qa_Wg~ zqbymLrZW-=A2g0Y+G76#7g;PkqDicrxE;Gw`QLWuVPC-4IPK+oEK{QfhDY^9DTW1z zMdcFmXwL;I(f7HU@DlF{7@wJwb8r)xgd!}PE@<Kz!Iw>+LWCe(|9Ua!p^SL z$`0a97_dZV1y}p~tBJYyF#2YQ5S<%uvhwq5#2uR6=-`m!6we##%T`zM)`+Q@A8SWw zpip>SxF7_lcxC5|e$frabh6g(`uX6PMNdcd(m?-aK;xE&=#F=q&Sb4_7+DC56)5X|?`x1Kg+g&rjLbvt7y4zn8x$`sU)G zIf>@A*epK0`i5w5|MjkEADe?bk(h!6??eSHEKqJg{mm46aqfILwYN9@)7i6M$28J1 zXVBhAb?DHp-7-5nUGP5)b3WrPW*H%;#kR{jp6;ZyFazjGTs-v9;?#md_0kd{W%#U5*RdpZ&ZT#RU4onu5}SU4!p_ zeX%CyK$!Uj6BDGr>r7ZI-K#@f2!DV$G6KmCfiHFJ!<%D zRH*?l@eFDOqPY}^I&uJ1faj8K%~9A;>TsXJ1N7A$M?Od6f2`J~iV{M3BV3ZNUR|HP zDqHIA&l&By2Tqxch(~-r3SiufoA`vp=}Wh^jE4@mX6gESW3iME01}y5;gJ*gEof^RyYi(A?T$!-dSbFrk;N?dGEE95GA5}&nQha+0WX?Q_m-8%pnsCxPE^m&gAQCGMa*cMq7 zeG&j5lF`7gDJwMsz7P3KvVK3-)EJ*K*E^dp|NMaM@)g=M7xw8t(OxmCyg5n8{CT07Y!iFXw-Xd?YkF=jh8$2?tN}yOswuKQb9~+{e+4YN;2A1!_|F#bi9}F@O#_Q zCxgoN2VDBIQ3X?$kSd8)uPF8iwVEfBW7fT0$=jEGk3Nijz1Q^AWu40pgXbu`eb(V) zi*fgS+6TgfV4&3_{y8fm`|#)u)zFYbC&TR|+bw2!GnSOvkv~uT!-A4$ZGHJ>SJG$hkv9 zH=62P=#No0X{mQmFR9cjO$d1nPu5`ArjyH+y9fI{1g)!6njV6cw7c@?(YqKr+ALeW z@@~YnS;<}}ckk9$`^W0$7Pqz?=RD4m{Zdg{8X)4-XYFa{{<2an&Q>JZBWdBiob+*V zWH?f64kg~pT&b~%VzV!g+F5tNPv5j=+{ zuq#iiD`Lb%MVuIJJ^a8Uwn*#@4UzBsrc0WU{HXq+lU{`7M_u=@snXtN>g(FEo6e%1 zHLP!Vy6VGnXXi(JZxL6~fA{@p1*f&Ti+mq!-)TAMm{j!jM1P-uJoQ_=>aPo)ytTu` z)0txxCfqbFbvd%>pWh!V^7b-KB@zB*>ABMR!ea_XtZRQHPP|8z8`o1n&fveyEgq_k zdw}6jxRwGVTmq-)+8T8&Gga)x2HAx!N_dGyH+p{a@2u$I_c^$Q=}h9v)FfGYIj9%0V`5ENn_CCvJN@9rQ(Qq!-U7n7d?J;tfNo6 zE~hPyhTmD8X(BA+&@wQVj!81F$N333wykbauBhtIr3J}e;!8x&w z4hn6CKblLDG9Dm71XbKwT(YFh!Kf?pbU`ak`=@KybR0yK7m4e)k0K+C9uvkEu~@V5 z3$KK&8?3Tp)}r5!OneM~t6xrhb7oR!iwC@opjl!W8G;YcWs;#g<7T9Mr&b!uCweTvuy*bZirk*1 zUQdmC>s81oomqRNb^kGn-OjmM&3f18Z(>Gbq!KnW30P^dT$=yT<<7>nhmZz5i5OcW z6zx+9GEB4C@pR=RW+Fi%>1(JJjc9Gx&Fy4?N2&4l-d|t%R|oIU{xH@VbXhRM?N4<( z!tlutVxFTq8##LZW0g2pOfUWJf3pEflTa z{du0(b^HE)-{1Ae=khr}oq4}s<9Hs&V;w_^gY#)XExBmu3B;-o{z~oHXQ#xPY!)Og zji*p1pl%;sG!`!nkuI*o`*BbfpAMQUb6CmoikF#Yz`u7vY|?!CbXMixjsg)y2eJ#K zo}>{y<2)J&v4I!VWdP1ZXyp$~aIj)bL;`HAtaNyC(#{Q1xBN%0@yVIn4YONaS5l`0 z)WmZmm1GAUF^qUWk$}tf6t+u~#`o0NQU3>)o$S(E50_1nx*GWlswwG5r@(`TPnds4> zzHaXHZ4DLIVl5SH6`aKJ;5G`ArbwnjP9ZQ<}6pXXox z&d5p}F#svSvqS;=9h)&z5W$!JwLD!?T7$9 zh0gpCfs=r12-|sQKJ}NLP&m?XDwP9=>0*kewsSU>y+dMh#=+ zIsxTEV|n1vAx)hvy20_!?D^kD3UId9GO=2W^Olb?3D5AU@TMcE0719#NB{JfxwiO- zA9&u~cki4wEaykSYwP?kq{)`=Dpm@x8N{QXc@Y%=;PhnF)}uV1xu6FS#18EG1@<)v zCV}9d{0&Etnp)=Yh>SZ=Sx}WJ>I>0w;_L*;NZr3FS@yX=ka{Gdo7mzO<7o8yKWltUXx(j5YCJfOJcSXHk!l(GCBwkke8KF<-e+1?;*9yqVy9eQhX` zuiczPLY^Xc>lw)ik1FUWaluZJ(>`XjMySj@JPdA#1rIPP5vqeSO@yWs_?qf0HUVK% z5Ev&mp*(b7_rX#cS;iq>`Q(yku^fs`kCz)~$lpGCdGJ?Mh%zA{W-5En>1n7Vb;t!l zihel_H8E+;D74rVU5t(pCfb&E8pq98_Bg;hgd5$_rSNp@u7PY7v7tPtlpuu9GV~yX z`8q}bil9)aq$NdCm9T6wkTI^M?H|gSE8oAMfOIL3_8$vANR+$Xcqudn_$tDI${XwZU(vat?_l!}#E7*mf|| zt5C2-q!uSRuc?IZn$8G*9@4)GsRem`iEt16Lf$QMqs$#- zj2Kh#v4OT)COqZuL^3{Hs^qQC7F(I5c?TU%z4vzY)YkrYjRx8sfwX_*ddKsuJPAERF(LS(I_q2N zWX<-BgF1(|en|g+Kc8sV)FSlBWkzaFGN`j~A9DmAJg6npp@U#!$l(|^tTVblp#q_* z3FQQxi3qR+)gi=G{%e3YS}_3ML)23yDi#0c+Q|Y-2UV7Laz0axsoAROqV$Ipq~SVQ zd~`4^5gkhqyPk)~Ql3UJiy=u8SQ?L=mFg{89VBUK?64;8KWX949XmuZ&iWeycqhy^ zcluqKYGoBqe8vW|e15Wg{iEb7s?z>#!6e zqL!n;g2pIGV+dFM6QtLM=`*p5vfIK=M%@&5CHIxh1%GC4E1wl{B!ogP4iI3aS#jpl zTI<=o^pdm-CV82R?vu%Tq!kqn(6;g+ncMz<7g#-Ic0P|3AGu6~>hs=R@qD?W6 z6T2asAr}ROjB=+QE0EZJm6ercueS_Si{Pbr-?Pc^1QUVdVg1~0u>+D9O{{3QQq{hi zTwH0s=8KY4-0EZU2S17J$bI@uFmC-E+~Ms3g_3%M7ny?f6RxVgv38@z^M zC$xSav&$omhSG>m;P@p58%`^FiVm-^VO;WyFL)OR?4zr49>fXYNnI5@7#@XB(S`*CE*jf~>m#~}?9 zNcyRQkO5Ji=rI=a^9TZZBWxv0CD=SpEMbLN1hjIkS*1?N!q>}xk^Iu`u^3ypbWA?K zlD4GgE=xQc;O9091?tHEB2NPCB4~JdT;{>_6TD-l5k7)=PT)_`zycWaT)>YDGaql$ zvt?_6*aI0Ph{u5MkO3ehh(|1WWG4k}IM<%{smNgHm${82X18R71fNj+`K~nDL+8Zf zBD0as3V~%mm+#t|d2skpVA{TYS!{Xb*=>z=QM0ykXc|xhL1d)? zftyV*T+F> z+8WfZ7#QG({#CL15nV-qO$g1m)D7MSal}#;8}_6*b~|3ZtlL?RG&8W?VX23&+Qi7D?g{OrIV=?ZU&X{y-f?2bu{?W8e=m5)+0>i zSUjfOjE1*>fw}kKfp*w&7c*8k+F}FQuC`>Qh{VEIbudkvM<7Za4WbK+BI!fho`uUHyA9dH; z*eiFL`ma}|gPJV%#~j)fmZ3F!^S292ovxle`D>EZ%$au!3%k%LNMIsPIw3_1CU8y8 zZ*CcA{B|XaOqTf43Z66%lA|qRd8QvOO@b?d2zX^c}iaYuSS3l8bAMt&9Q2xiD}JXwoDU=f?lyXJ*AN=b~P zoMnku^^iF6@G8AFb~f~LKj@UXUNOn;@REUjcvDFEGc)Kb(XK&x!`LtNp4#QD(GSj? zaH9j}ptsbG5@Ic?4jZk@*ykmT5YWz3Dh?h!x(6{IWPbY_9KD>4Q@8X!-n7mKfxy@5 zi3)*pc3yGnX}U;FPIxm8qx%riLK}>ciDtRH=Wrk>N4zzDKVVR}3DW*qSDsxl5AC#S z@3UoF!oRHPnPOF9JY?L6aEsLI66(Qo+W~u{a^pzSEUF+Ul?x!2=5gb!%_8BNApQx$ zBNQuJNP|TZ6W2AJn5N%9-dc0^V-h^>dp6TkZ7(2eq*de+`Damx5Ff(a^Wj8v1`=k8 zSC>{;qm4L3dMKvnu$%rxGI?UvArtwpybyzXe~casJ#&$7TlChX^u6nk9ZLAH?$*3f zE6aawpUEAgsEg$x3`F&esSGz{0wo}NZziRa{!ru-h^P$i;+{zfVJ1aHBnUna^xQn* zF`1|=gw)^|r^mA)t+!qCZ{Uu=C;z1(1=u_wyE&I)cpn`ULm=iJaRN@$cv=;&O3EUz zN+TOa>WpVFp=w8-zL7e=INtM^YVbcg7B~MII;PDG1TT&I$79OxT?&%7yJ3VEJ1Ofn zJ86WwB5UF$`qgeJ=7*d2?gdjPL_dlrTY|S@A`HnZ$c8as~h>M23fv@0b16BWT$vGy;4hCgMj*%Eq~Fo1(s~OP7ATuyk;d zyju41>6W}XB0-o#UC4APLbUKfXr_c0PFi8biWO`I?cl=pRo0WJ44XsdoDg_na86pk zWr%phF8r@qT#($HnD!5hHp~+nHw;G^e$C1;^h)-!&p7q>%?Zt?4%40%n-&!A&nwtd zaOk<4Lsf4LXg|Rhn*=UU}@T?m8QtA?sCouRBz8{+~sQ@*r3yFJGX&_MZ+` zuRkwZ6&XQ$I0A}(`|=7%$8^{@R04#|2wx}o%jf7{jrb00=`HMX{5SKz5j5(*?YdMa z>HT0DwQYVckA(1gxiet;(kCD_^g#p(l1SWy2vFlG61hf|Yg9WpehqzNT_f4r__3>4 zx)o*YCHnP?|8f6n5ou)(M|!Ata{U^VWM^7qIo;MfFFWV&=56iv=0@MT_iAL3OI+BR z+F@QjLNxFigLEufl<0In;Kh|&fq}zJ6CS%CbYGOG&@D~ER8HM9<5f3#HBgT}kK8RP zkhFH-B)4lZ65RZ+$~$aUz74A0g@C(9`Vh+HG8V0MWmG_M0xVV zQOK>a-Ddx-9GX?j*Iv!2EqqG!?@22kcIeXQH~<2nqV}&Y@ny)_(k@zAn`>QJo8b=! z6?AoF^Vnl4$%j6DpZ;x*bHc+%n=hSO=oZ{!wXU;!TPq0yPjSI`Y~&_T&-a`-wj*`b zmg#SPZ&^75ZRf-H8Cd@JFOeyu@pr1c$S@c9q<-xj9pylEhaCU$LuNETz3NLr#1ztn z%pjp8Tr;$w=sz(~w+1S+v|*Kst%_q)BfbqzWOZlK+Z3D`;dR0x<1;Xqq(e|{@b<&=cZiUf}+10 zfAtbZtmv?trDsY$T`#&&v#`18Zw|+~{`YxXpFnMy-!6zMLWGYv5@85rw z!cza|v>}Z1b8BZ33vAw9H8{XVitay=lfq*1WL7W0zqzBth933e@j$+_x;N08qW!0F*%K6GnH)27B~Wo@TWgasVHH8I-?3hU{{xFBGu2P4 zV1|O!dV?eJhmIVnLtc)G*@|j1B-9yoJRL(ikDhVEQ_l=6IYLBK{$~>x7TRXZO2em@ zLoMqcq;9%FtX{_?JCEV~`#JTTA9e|O^-&9z?{LDf$5_TOfZ&_RJNz~zS{jJ#893Ut zYbT2wGdDK?>v+wFo&A;n{?gIWVMBIAzmq3UxW%8c%Kf-B*WAF%>(3I)iQaaWUzae2 zNQ{&`c5kN6)8foo&$sq-4u9#QCi+cIS2R@se>YG}B|Bu)R4Y29C<7JY%aZug?wkb8 z%k82fy`s%^^BwxKd6`ZgdFG$BhkH7iKXnc2qGDp*cUj}%x=b(4^`kRc#k)*jfiqt$ z%RFVD+zo@a$RpzzmK4E_{8pF!ru&7Sk7zanlzD(m50Rk}qf^Ei2z&~=0h`N*qPv}a z5ouG6=tJIr10p>K774ztlx31J$ z0QQUKPTB23d!}@9Vs#mUhz*k-iZQsx>tIA+;n_<|1YVCVAJHs}38XuXIW1EsOls0K$W6cM9 z5;n%BHg4i5I(}(@@r3SUm?DdYO+?OsF)^t$rlxZErSyq?yN6e-_m?{&A3JVbLt)Su z|Ax#RrAckx27mjY-0;7znsbCJ#E>EE4hYp~>pI;42^0x4sNV#Q%DHUXb3qof8!?4B zZLQH9aRRJjYQD#rPD=`cWv1=5zy4+8>DL${k?i0m1RCtgAS+m{V-0Me=h1!g5e?}g z{liXv+cV!y6zNBxXjW_M0@PGQ1dwJR%t0-$l{Y>_`no{c=C;2jIkbGa*ggkL+%e@& z-t_6y#TW^8FAz$G?5rOl$fHG2p5FRHoRAzr__VE)W9K`0m)ySpKRHJWWYWg=Yq+j^$&{?f0h_;+YL*xeTWfqzt=y zdOCbFe2snYSGyf{!+&f+$M`F)67z@W>iXrV5Q(+5kn3l%;b$;yoob&dcX244EHYId z_H8gp5(XI)UGqMLC^|0a2cfe7|FM+wl)IB!x995yjNu5b?T?KZU%HuFTZ(mxe(zRHC$FQgdbFE& zVvJ$Gz}$O*55A^c@foo9?+WLv=t{khFW(;{UIR5OLu)@~cA?;4MgX=N$cu$lf!{F^ zV6+kS8Q=<>o7`588OVQDc~brl<4h8?95ndc>uYtmbx$_xZT!c$Np%e?a|6gpo{L2{inQla=GnZ@Cu>5rd1 zK@vO7cYOYSF06XUNW(|c3jCdoISxxEZPh)0Bc0$qHw6wF{PA- z8eR(?k{tWB=fcBh9kbR2IDfsVA^D#(2XDTJTh?{%SD)xZw|!5~i#9kpv;CVRbBY$O ziW{cc)X>21(nw?ZT<3mJ&g6FAMWVSKp-(WXWVY1s@UF^eM`T0=v6bJ&+j}b z1$TW_s&Dh}*Q@Nt-y+8HJfs&cZQ&4N*hfI81Z=YQ$D3XFvS_#2($kqd5s^JY(mSK; zN7y}xG&&u?mI;hrGRJuzHX$Y0fLnI+X2RyI4?9~^ompLh6usok_a9|t>MgKGJ92Vz ze!c*h!@?&vUU-NXh0}d%Y{T(Rr^`+DLJ>_A$AAPKMI*^7Vj%^=xi=`~=nu~Ow`}0T z$=#rSGPTLj_k*I-JX$c@GhDa}`|t(f2cd5zWq9vRX)m@g;-Vc^-?ce&5OGha?1gWF z>_on+Fv9`D_Ow0`M2i~>r3c=OAWj}}gUHo5JS~sOO^CoKe>^Ae%gYZ|w9=O$_USNu zLGID()~9Oz3b8UM|NelTY~Z5t5sMuFkPjQJ@eFewxNdq`z;X;DiJkr)E&7_NyL|WV z1-c^Y>+Jl?GgcW_T+bytVim&clbzJGmmQzJ>(eIR(S@(=+FrhRq161d;yGzr`t#py z`SPlLxu0~{xhJE1{PrIzG}d!;F_Ths?)#v%CARnQO|xCoKg{a4MA@@>4O$kFgb0qt zZ4v7dxKoL}&Ts||rek7378mvTaC$>Jt6QKG$+ihHq(U{*$cT9xBt-R*`y%=d9)pSS zS?JIteC4gYb2`MpIy3nT)0f?Z>Lr{$)(*LycN~6qy!1`-@Dio6zY`U#Vv+8eOHx8m zCKG19IS<7a@?nvsjgyXqlzU=aB_4{~a+{D#rYofBUpd7&xg++9&0Zp7oS6yVdD2a3 zk;1|$lC){9v;pvp2tweV0=d(e@s#B94Qgv@Zu}UV;l|Q+m{T5ETZS2hY9>s$2hGo6V)Emm)1Ujno zi$l*{7ZMG454#(>Mn*?K)W3WWA2)9F1lj^)Ou1lE%6@y>7OAz;R&1v%vF=6C9KPGf zXHROXQZg!j@cw;=Ywjib;odTi>+`Rzd}S3YGJGaq(~gcbxojy>J8I6M&!F7#VcBvyv3e0?!zY@$ReG= z17vg0d@f$UPBE~A8NEG^)qC(c32Ns);S22DJ0C1q@{3WoE~@&0xZk{a+UUm%$)xV| z84^qSMyGlm6vNcE|nBZY-nDeCeIqe8^f-LHLA$<0u3!W z=?=4H1ae*^g!?_$2en@So?be*0Et?Z?(D!h&Kx>evK9l7P&Dfc$5kt+s7yh;&XHz< z30C`e$xogOFpX<3oDee$YMSe__1x$=Gd5@A>K0Jw_JzjR162u1o%ZUa_viRES%pK} z*S57*w>4I8Y0C$KZ7#|QJ#lM&FV)?DcBM|eAE43u$%WMU-Tj@rp6%f47$cfdY&2oz z_DUmmeGTf>^WC)X9iD%Fc1{6iHc!L*RoAogccg1O(Jk>^yvqwzf&N)6`w>dm6%Ln9 zYmay<5D;U15$GNd9>X^PRc~51Hq;1ty%_KSnz<)J3u(6c5$*{HeG%HvR`Z$k^_-R5 zn3Dw)+Acbz-0Zga@U$H;8$^iaQ3k-IhQkA(FSg=hK{nCZR1Kc@>6b}wy7Ea0iWz&Q z<06DxRmq%ZVjkc0E-Ac=VJn{XJ7(-(D*)|kF`U-X-FqE zUELoU_e>XLc=q7O9zINFuuF_7}Cg%pniVPR(Axt;Ypc0aTBxK-4ML(NsCFPa)g zT$%a@RmK!UvwW*j%Y6EMLKwAp#j(53bECCS7RI%G7;e{ZiQBx#5x?74WsmsPRgx)0 z(6=~=q7GKmD{KeEVY8AYi9;L@MJ7#qsAwYkmcmCYxeAU$S`_YNKAIj%)NyVj@x5wM z`638|4R5Xt7w~(|8}^aJi|Su24i?eR2ZznG+mRy~^Tzs+PtHzg9mdw%=m*3~^q(e2 zX+nf8n*BnFE9y7I_C?46YB@IzPFiz}2&pkACki7nhmJtNnT*RRra!;Gj)Eb0%+o5? zDl-SoWnTo^>U#}yfl~dG3>4DS(q3Hj=+5X9o~BtZTy@FqlR$@T{Wfe6Vc~p)^<x2B%bxY*oUSNTS5uPIhm=Se>E^*<7l6m36F96PoaW~Pv}IR+feXd zu~*UB?8_7FVv%6OZ(TgxpTfZdl}G#mt3|KgtQS2vBze)pds^*|A7I$kqKOQ%yQXZNGnfD6KHZM}(<1bRp@ zo8TVXf$dxzz3R*ME02c&@Jl^rCQ>F;pgqUL&F1!Ub3(`&ku%O%cc1f2@ zNoEqsjEp^)lt;h~2#)q^E%X49Jc+XXvJ*i1u;shN3RH`=Bz%h0$w!YKMIE^%$g5Zsej=Y0ssKEWowv3ZyC=G2TyhHf>@_yD zC*B?LpwJ8E`&tC(tSp`yOxKMd0|F`elx%21Ma<=%<21*E21`(>nag=4W zUSlK7Hj>uVqe-Qk{bg z-*KBnSUQ3`CN#-w4UdoZIwRJiU<^b+JWu?~JTg84Pz*eEN=-eK_aElx! zpahF#`}o;s{@u5H1ec&I2M>&7`p=nTx}qy_$c6Va2ioWAvPSPHR8>6%PiXs5?998e zdH@twQGLtBK~{4KLh}B1gdCuuAtQW%wM?arWB@mQOE1Ew-I;m$P;^ygqu3RU`*$ab z5kk*tN&e=K^wyhsApf(F-SAdbZdP31!+r5$<=1s1IIvdICx0TJFq7s&0!&qsK^bwB z8MzQ*^^8V1Y$Pes2>@E+GlH%5&g?p#e!^Gs0Vf%hrsc^-H?iJEY!uIKEZe=Q{Z#_t z5!hb8aq||1S)T_GiH^|e;_7O*Kdq5`l(Uz!K+34XLv7}K%Eay&&M~s&d+wX*){Hj$ zD_XfGcHc{p5oJE>Q^)3{tsJ5Rh7g^JceIP}?{RPn-cjaEd(1}Kqx{(Q97p5Whd82D z(Wnl-=rjRXFXHAshFzwWxWgr5cZ3^#7mWOz4MltQ?VF4_kMWw9o%VeI-b|)I=0q{> z(9ZBe3q_0A`b~BdUGx%YV!l~44HpVq$uIi-5Q03wN=>`7aqz(rdH`k){xLppQG4;Y zh5BZtiyark=9#;=sG>w+>iB5d@m}o znB60-?nIm`wsBfuSyyUXU^KYG^n8fq_8q+z^)Z>b{EaJQLs*#V?!(D_yR2EVaLyW+ zjo_DuT>wy{?J?=sjiSFY^al40eU1^jNfA2y7k(Vcf?gBTZ-}H8j$hr9P_3J8~9*7f$P{7=%WxPu~$p7w7KbAfd%RA@GlGf%w(*8OKaQI?N zSr_}Rkt18gD=}z2nTxU?jj#~kY&g?S?u=T`7nBt@b}#suL>)L_PWb!h_>BLHF>kGV z8&&=0YW{HOR=D6p8fSm&dbb7S(iC?3Q<)lec^G-`k zY%seG!(m6RzXWi80Vl(Q#OtqOUrs8x+THTnq;=H;jAq^W_SzPHTN4Hom&$|m-QG@y zOMWZ}{z&KjY~D5Z8Cv@c#%2taLd{`)gU*l?YE_lYuz-! zq9vV-f*a%QH0XDpZCt)&$;l|!=yB)!kpPOsniuZB3D}Gv1!qsERj2=(g9?F;9&FIg z?XBJrDvz3=qT)rm^--VM+MkDcnc(o^rzfV&uK#=~=3C0sz>|~m=4z*0T{nK~`Rj`I zKb{A{UDdL;q#h?|rF1!jB7qWF4G)SXb(hyD06?V9%0-Bj?3A)j$!xv+7oFa9j_pqS zW>v7mE1+swT$7Y()Q1}8{Hh+?i*&TU_Ue_ga@TC%2)Us{$4)qBSY8vCTKjW>{1=oq zLxu=$70i~lDE#2M$V=#`|D8}L($(KXOE2%wTzuGX6NSa z`*RHmv1rjzZp_NM=0E;AGasly56IY=$vevOCX15n0!Z}6vJ#Xg3^}Cf8 z?ebYE>8V=h+TZP6eq@1lt6G$+k=wAge^k7ZW_6c9GU~@g5~6Rm>S04>EKZvCxy12D zV5hBn3RKL^J9pQmmzyUt9W29?T5`P@+l9*r?u-bz@R2{he}Bm7F!xuCPR&eHU5!aW z$;o~0ZqRD{>vRXgp*p*%3&OEvKT{i<{q|L|KhwKgzrqPq)xX?Omtxk7IYQYpQxjMq zk29k5P^a{$QMCE6iVOF9ykJ+HaF0`fL;9FD=lkpwVuQj5pY)3Qrdmjh7&oFiC$%}x zI-FHPW+^s=ilwGj&+lsr20Bvq$tR~DDaD>hfkw#gVJSQ++9*%qIEi4Y<`7TWlz!>^ z4C&*xXH1jw6659dyF0FUsZu;)jUh7oexR;@@)3Nx}=K3?Tt322Te=?N9l*YuC%Oj=lFeB$+0EGegOXR*}7SedyXm z>mt&rCETz#2)qzO*#>zhE2}lLq#q0GO|*913j)*QeCN&{`OskQ(s>u&_ubm*G`JZr zS{j*=bXRvph(0L{LLvs*08g8(7!&J!@!44(BO$LAI2PjHFXKxHL!e(vn$zi$cPC0Q?-}xz|8-e9VBd;=r;iIrM`I1M^LsJJ?5O1e@@mc z*1XrP;?3!3?O>VVvsLy~tbe*pV#%UBWlws$0|WG`I?CoQOSxNee0WL!S%<3fd^mB# zLWUsSA3;~i`sgW)e4I4{qoTX39uAt6kmNei;K-=-Pw(R%^0fdwCUG#6mMugxbRe^? zdb^7knC~h8IArzOk?#lGFzzs};Pc;}k>%d1^?fXM&N5SYFr#VPnh~QPw>d0au70xR zM%lb0>k)}KTwJ=1weUm@pQK?Hj4%1KB5gu=FocAjsF;6Po_(-n*fLh^prUDi?d;j3 zu|AytUAS3(KPJ#jJ=buar~1EIfOC%X6URnezJGneWdDyz4^QtKuOHM}8Z&Lc-P8Lv zznrES)f~`GPi}VRSCI*mvt`161BeK!rwbT5{`b$Hi7-6s>oqg=oqoO@cT#3`;q;3~ zd)-*4dpq*)0_%{&9`8z2{}5nlqknryf|`=$Q`c*44UX|_?l>zsKD)eT{4C{bhUBft zY(*gsCGnEspH>5B7)=t_dw@>Hh?_SzkNJ?k`}O7iuFIF3|EbGZ-TTTO`bjc90w}!n z9BznLiS?SYP8S{#1M%YIW8O1jZc)JEJEg111}S&%KI?s#UqNBv9=3L>Dnf$CgeJi# z=nW)I+p9U}+|iqt98#XUd|UGqdzAspN@+y6r>-=(Vuu;g01Wxt@cPgqE@C*@y7K;V z#9#C$x$FXG%=K^mvjilp^~qb)ab4SNQVBD>uuCgu=a#v{D(2d=Luq$~XVZ62{C3s)GcR|6auijt0@uRlphfI9?Qjk5LynXsq7V6%b z*D?3?3?M`wEKfC6Spv~th$j|v9p z*~M>Bt{nqv%EiKKI{)LegMKE?eL@XGcJJKT0hH%6)AmB}Hai(7xO2Fh0q~w^u`g`; zkS#zv^cA;7PQlrDfS zr5!u1rgU##_>A(VHxaSdqspdP&9=QhoW+|i9v&UP92kyI*v!S{_`8ilkY{UaOJQmb zy89Yi55MRnk;Fk04wpQN_RrE)+H()U#O>gbBcA&=ABaqu9M9>oqQ&q9DqPn^Wm7-Xn zH`nQRuLoRrj8Ss^`mvdqe*?pg%on4=*s(r4O1HFS-A+psqRHnez3DMkR8_N9Zv;dX z0(wSJj$`IQkrg3!7)GApcXq{uEB&6@49R}*-W%r*JI{@GNh(_5*6nHX;uVEQP2}+% zU0keGz4iy*1HEK=_Enp2)cA>1IdKU{HQdKV62UsyI<%@hI-18tES7(twuo*!tk;Q6 zK;FDA?UDQP05~~0iO4u4xKm%VD3Gazyu%I>B3tM2W5q{4xIp;WzT4I}QFErpn>&QB2(~|FgXaX1yzq-qp`))K5kcy9q z?-vCCX|V$cahiyvWca~Mk%S5wi|#?e8^XP}b6njicenBNpR9HlivhH@B0+WdTKWiV zaoCE>;6fxYcqkYbTn*nuj3yr{5R;t{5imb!{|Q%=`^Now;@ARnC}fSm?1>eIEF@uI z`0_66lfDMIT||x8#Wffh8lL1u70ey97Zu5p$2(H{Ye|_8HXbAK{&VSWL#P~UxNmzDKmoxWzP1PpC??Bp45kdj{I2JxkwP>1x90$ua2;=z6d#;XI zCXq80J=-3(6M`!v3YEqU&#;hG(bC+E@+tG@dOoK8xwAv3dR0Bwff>x56dN8n`^t(zvPn}#TXASbX|`i^Eu#irlf}7#9euyOu)Q|7aNhHy30vA) zh0vIhLF>%$lZCSZ4f)$~lJ!iPja>nNxeL#8mZ_D~JiW1RkJXDbDM9s-BrZ$D2@7@r zfY@#9PJj0YZ4KG@!W)Yl&g^s*ot+m~Ul>Fng%RA`W<3g=2_TJ-#SsjnfCM5;Khw0t zYZDy--xkkwqgnNO{!0Q@h`=2?^y%4GHg#N}SI+rC`8>+$694#1&v9#!Ytjh{*}mv|35%q~_u%9J5%~Z&2wc$} z8U}3`#IV2Xz;-y@l3)A$_}TmhvQ7fR<^*V(_>IhUtY{6w!OSMh@Jr(X;JD(RO6Phy zt>XFl$mb@aHjI&Hs^xcrOvFw#;9*>hmEQ$6otc^Wv-M!i(W9VL@3|+P*_X=011gA_ zJMZb!?py{e|DdV+vuipgA#BsqUDfN~o6(JEKpt4pZ2=#%dbW=7&j6(hepjOME}1Dj#tg%$CN@WlRY7dK z1?Z7}&y&A|>Lh^$dppi2kw7QjQ<`9TrwMef%m-+y7BNNO8T-gz!L+uZ`c?{9mM{jO ziGfZOL}-dbMm%sMwKI*YgnJg4NSLXS=j{3sXnp}yOK;5DZ-;)4L3&J{1HbW0Y0d;r zU3<2^2pu4Vu8=wU`d+-4LdS{}g!iPqNL|IW7BEd?;(w-Bu{V|m<^)%Or7;nI{p8w2 z))+RpH(49!$mM+&=qG|I+FV2i&exW$nkpmzPpt9DzgQMf0 zKKP>Pp}y7<0t%^QxPIR~t$h4MgVPQL86FWdcR#=}VCxg_UQk5j?4MQw8VjqJfa(!Y zpd=t|oE4|+4a7sjT=s}KpY0lwIL&xh#4V?2!uTQ2h9?Q0lwg)*N4#GU8Zr(fvBE`+ zIwB&dttqE10m%Bv1tDffVL_6nscux;gpC!d%EiSQA2zyt(YCZz2z@c%?T)h9g*@dC z8Qta&du(`6^TO?Z33J`tzQyJ#_wIG}@p0P(O_|;yr;Z22A8!{B@Fd9fRl~w}V_jWr zY^84cI7dYpG+o^6vLn4Ay<~$j?d>2^2++%jz75nzk^~A1qDJYGOzR~srl+&x*oVqW z>k)X-Lr*XDa?9h8U_63Wgf<11nFDf?t+PYy#+Z|;EBlO5Rcx9STro@UP>)$+ERu@e zDz?&0lj4>AN}4>6y22|+3H*~6RmL{;JFcVa?&*otaE^#w!Hb{lGRB`S*{t(Ht0W8^ znx$3`?;t_!D&$InTi`LF-+F?RMYufJ%o=d@>ez0(q!1qy44707qWtB-n3$Ax1D-v9u1x2T5b_3O?e6$! zUJ=VFKPnD6q)bu|Ta9_2YAR%G!NI|g4!5^TvLh*x58X{kDREZPShjwEjZ#Q|7gn2D zBwkUJwop_YFKlN#tpn2Zu$+O-p%D$t?egaWPvSA+eBsvzbY|*LI)9{?NO)Yp7Nh_G z$hu-$30N2Bkg}K&J9m(SKw+^}edOd4n`IB#v%6$9^9&_$&Fx!9i;qD#YF$K(Da40m zxXocnQLT_1zm@9Msbmk!*kKJs=T9;9Aq5-oM|5ot7r#8*a!OsjO&o*b(=y!*7&QoG z1E0-ya$+l9BoWiZXq#*0h5K}k-DFzeA)O_@fBPoh+2ghSVvPOt**qb3Vp^^vA=Vp1 zZZE@?otFl_#pPA05=ByCRrAWyAry+CDhXQ%J%iuaLpc$oAeW2CRYxurP2SV%juxLB!#IxMn~q;lexUljh5pS3Me$?jR0Zf+Kf6PNyLIG8 zKQg-u$2{*MDk?LYIMK}m9aH8zR#3neBbqm=Kf{y}cI=q0T(Ek7srap$>hU{4D{K-b z$#cT*u-6uQ(C8w6N0zz56)OC*(8a}+dj5WiW>kJqi`Y2>jU(YJa|L^MuxY_mBE>cFB`KFKQH*)C8E?v4H zCKR&^9y_6=ReO}V%w9Y4khWvU{qN%=j~vmqxLH1^VTl^=I&bU~R@|5xux+z`qIP5& zJ`Gn1WR-hC6xgV1elcZAtv57x=gyr49ZaP++TE{u9HpWl{M;u3z)2`s*Z!k*q*oUE zA{lR%9IJjuPs6YTu4|$<{#{n`c1@KKC2`fJ4C)qh(QNa_okCqo%tK7sf_4Ko-drV_ zc-v*+JZYoNT|;Fu^9f&;g=QA~9ci$pAT20fE3Qw~x=eN*r0R&C6e`8KW(kd}>A?!YhJR z9q_IAYPWc#hDGzT4yRG+D_Q#}W3KX#jxH0Y8QtAt{P2{_j@v=Ux=YS%j>L*=`o$^-=j=Y@ z0zSU?cf4r7g+2~@4yBy&HIeS}&fAm{^$8O$Uf))G^kt8|>hldcPTivWvEbb5t=;t0 z%gcG4Ih!x5Y#69s=MB)0O*>`p=WW@+FKNU?05EUlnwbh|JvR*rXx`EqqxJYmgIY+i zx@7<(5Zo;q>ix839gFf1+JwVOxUdEdVvh<{XkC=IDBxH5HM=h4=v{ zmmp>@uPR*(q~FJQy-md*c+}AX*FdUgmmGl~9w+SXVe2KFR!*q7^y&A6hksh>i0%T6 z0D^o`n!zqzh1R`Gr7bvdc#{WzOXvF=+cv4IC-ty3RZ{%ZpK!GQ+1s~3cZjyOl>(~J zGBTn%u3dYiV;pK^INlB;%Z^7!N3T9cktce0@NzxbSB&vH&)xBCPSo7}?Aovk_YVg; zW^D_yIF}xaDUkK-Tm>eqh%y=hj@yKEqo9gG}HZfGnS~|WPEI+tG{lnTD9u*LDtQH}!M$ApE&`FtZ9W)jDO zW&7ra2%-&K+}v_0q^nu5!1#X>7==4)SYGy_DPa96%dnI({xsOZv>=qPI(Doo@>C>4 zi4y{>EfNo55RStR(Pyl;#nO7>BPca@yX9d3Ir-}J*3y72cP8r8O`CpQQ!PYE^6K4T z9I~h;IdyAlYvcH?!ZiMG@4|ilb4;v*{PrqiH=x`Iw0tC{4B^~j zYNYSEXfOgA9gi312k-{SAkx~ygqIFx(ynwC)HRTm+l{ROn|C@b<|Z={$0(H_-KNM4 zN?dO!)5%^`OqMFqrWdhpvwGH}RT$cTc|?eIEXSCScG| zctxPDRIN?#o5)wjW)&aviQlZ^*%ozw5oM0o4oQVk6EkZ~%pB#-wPn&`eI$bjWd|g< zZAR)4J7fOL?L2E}RYSXDR8GtB{0%9QJP#tYnpb4drk*}s^>f-w_r^0`v}%_SBe1L7cJCfa|05mU{6r0sWe z&J7pYuKYbXY$PE}SSv9dlnP#6srkEwl_5vrC!r4{{bs|RYD8tSS`XQWbho{&HRAm2 z_Orv{hv0|jFs-}Qb%N{){-N-+_U~Ur4=k1+gNKOa0C!3rx3kO3vq+sgH!m+yWkdas zxybW`(UbiWV3lHX!lun@EiFQhT)uSX{Mq=a^PiXKs@qBNvHhp`TYB*3=%jw(LOwvLRJHITQatYs?CN3Ik9X@q##gW zq$|a?dXZ#CPmDU_(eJxYZ)+^6D|1py?%CjK=&M>R-_2{ZfAD~7c{#7lwO`3*1Kx-d zL%hbjotKn(IfbbOX~Be+pZ{iBpqx9u)nL!E*AW+&?26EFmh8FD_1U_wHpiQ^G%1iD z!e^Q&YX4~0Ot&qhe8Avh2Rb;hM(||y!Qq@n!jg5fn;I{c@c{IBOg$o`3dHJo_pCEE znlCP|>hHX%R+?i4Y8s46B#KfGAg7)XgO3n{A?d2iIX%0~F=NJ{w3Qrh2o=Q(q4THr?P?*Bd_i)>8&Bnhsd7>18^Eh34p^}gCSlE9&84y zO_nUFDCySKtb0a3W#t`gWamXHFf^}D`QAZp%F3otBr61xWEg+EEq)6yums&w=cW1= znd;V4oGCC=wI}hO13T{8V(qV4bCEq9yR*-^ChxjDIx%n^`NA7H=5 zDOvGhK%|v(}CH5-^S5Bm*|Xx!a&-nU<8(@fXF zrwx4uefB>;qsQX!f%BcEN(FWj`ab=_g~xuSjDET83TqnjGBndhg^lvq)v-CtOXtS8 z?uuRej1g9N4ghhunr}pzPG9Vfm_+On$7HaVxuKT1I49gcICG*XmMpTCH&G z2UfpCStY~~m^RD@ed7+0)o2!H->cwbdz4csDhqv$Iu_#N6i zt}FTVfCu!8Px$OTmOl)hWcuuHpMxi~3&y@As)h;&&~CRhe9E9^>>R+o1iJR*Ld;H- zq1n31l6cN=Np7yw(x*er@%t))o9&um`>b!gR4(Z8WP;zD% zVE|+la{rJUMDM+mPFE;6(GjuBZ$nY|tY8*^sWAkSm8ncOgF9^srGo;y=t<(#Tz{Zm zGX{2sP{0)R;7A}Tqte)t0cvUze7er^@?O5aQX@;p>Cj_pj2iV%&896Bq06RKZKtX; zd4F=Nj*J@BDS%^6EMF6_^Y3r-MsG+N(Dc`!*XX4WDt;Lq*k9Y{&xN~-4$X6Ue9l5E;v24W9db6gYUj^Mte}3#qa&`CfcR2o8vvVAux>#MB-?#{_{8N+ zw9)GC0TVivRmV@+d+TQ9tP`7O_iQ-P#k9$|spo7d=dDFu%#5GK-#e)IK0V=)v;}BA z2RxBECiF_5X`lbhn8ZnhJ)CK^SJ0~tVP%#Y;a=25VV#QE^%ldY#KWxy6J)ydVgGDA z&@eSkY`C4Sn0RJRt?{~kJ?~bI+|<6!Hf-HYv5xmu>72@oX+uW^#2rLX0hbU0S3r2# z{q(n(TN!5Sm+UCvIe%?9E@akGX6Jo;E zpic_s7Y(;LeSh|Yj=OShJ#b35$^R$u*m^bQzocGbD7bG)Km?~Q;PR}cv(6O%QC7dY zuDTyx+)eyRG&n4GKvOJ441i_4Sa-!ZuD@u?yMLq4FQwXOvu}wFd--EN5bqgsw3P2U1rrk|J{BSavw>+2I=2(N@=t~ zF8XYalW0G2Z)wZUm*<=Zj@n;0^I4B!dmgS%aF6>ni??c`c&oe#-8ejAp$v=f6{K_? z8Sl@CyzDLP7OVg)qTB@TJYtuu^jr8S{~H@51(F3f5u@~Z&IteJ_4K*xYu-x|a4*7I zX!ZqH%4LRi*fK6MJXrmFYX z{jZJX) z8u?R9R3z0VIRs}$WiJ2F;0Qljq~t zwy7)R01P=?+H`gGTGCjBs}nk4oB8?v>xsG96lh;{^i$`wdn8;J7x&m@Ax3w)HaeqO zrPpMbl5d|FAGhOIPNx^6zMXzNza`^Kj*gwDz&69$yxLkf191GI=;0#b%*X@Y>4UTMM=7XqV)LGrVWn5m_T(9R@~ZHPwo|!MGb{{(ZOD}b($}Xk87&)tRSJ|%Sp#d|U$6RBb;KPNq zMi22a(=Yv9yqkpB2F`pDq_ZMA!Xv3|e{>UJp?YnF0FykD>-%bo`3k8r6k#11-as3b_U7NaXBuZ?b7}cJ(}8Z; z53;okMNPeIG;(SAWKJ^*3pNZ{r2{QDde>cR7Yhi%2U2*qGbl*704^0dP5!c~t!7Kb z+O$vU$wz#8sk#)OyrkJLB*N1|($*qyOaIwppC0tV@K9=eXA)G-Pq_dlr)Lpw1AFxE zwD^s(^vI7+{|8a5{>^6=Yl#56?^3?y#s|!-@=@W8nQ~@$d(kM1L266uCNhS^YcKQ! zx|0Uhx$d7Is9*FT-nM4cX<&r(*q?)snW^8I%$>Hld0y)Hw1YiPwM-PF>LPdd3obVU z%6^30YdbX7=ft~v@$oLS1I!3PC?Y<7`!>vCugC7u60lFgy@0E0%!Z$uJbqw6vsfw+ z;}f)VXFX9@n7`l0XUm6~CqgM;Fd_y>`h(AYa$|Vnus~kQ=&tO>L!;|iA?+Y5#ik^*3wAfy4@vv%6a4_Jg*nq|fN*YcJ>i{@X z2fV6rw5Ta&EEp8vIs<_@o9bmaz}k<@i6VrxhKSThWF>h?=)^}IRksgG)$_VLbH7iE zgPW(-omIc|y1-0i_L?}fdzM>VT%<}5>0bRy&@5z`ELOj>ePN~h+{b68oYr22m<&aU zaJ4Sj=Y_%0Ufg_4uatFCBQ zv6&s~!p<0N*mJJ{OQ4K2z+r%b(&!G_>HJ;;kio#)>7(DH^@Bv73&Chnix>4sYPnIB zIJ}}9gk9EzK1#%p|IUPIzdFn4lK9kKB{R|?0v4RD%7&&_ageZ=jN9jvuvJW7XDGtw<=8tpP z-34jIfz7l}2PHf7875=oHI?=qd*1I)L;JMHP}|#g24q}5S$6qYd|O=bz4Dz>OaGvk1&1~LJm)B+ndz>J}a!6wm8i;R8Dd<0-VHLvCr3X6%E&I>3y5wVgv zKr^cT7Z+Ieh5pV->-_yb7M%yJC3E7O$m^TTuA z-_}^MeB~$eH%j9`jaRF+j@|ryxwo@!SBeWvOSX4T^Is<=&USS>Z6!G_(JgEsb#E)^ zAb1n(PXBvYvHuwkDSh+MH2{vXA+Mf^+n`N+hzb9Z{L1jCq-|E)|MO(0g)C08 zac=6Z9)909#Ntar;^Z1U<-u&nlr(=8?M^BB!ogd(WvfW zX~TIPQZEr?#5EcXVImHgWYKIRed~=#$YPFmAmk=8Q}=!v3paLIgd zrZw*-U^k*f2oo2yp~l6NlY1*Dtq$+CR({0j5qXJwq!O*3Jbh{@X`A>=T@X7#(@$2f znYBb>Pr=CVtL|-SYbu>{;(X=q_nkU+R1C2eC?;={;g5;7vlg_E3J?xsUK?izcia9P zED1Oi%^%#O?$PDT)>KQ>^Uh(Z5aI2ZaycD@XSpaOjj|g&<#FBP$^8{UFbRK~OwY!s zAWWY?qC#0Huy9%h#$2OpH+9R~TYqwBj)MtEK$Y4q%V!)4x?djxTfKT6| zUOj|@qWRrnbKLx=#jx=rv)o8Ac&u%dkLiKalZOq9FB)!ocG{Nr=KkMPA9((c2sA+x>oXE4@TJZo21k)1Aas=`9Ge3kBy7RvP{4 zS-|DtCWWDwuGQt=>wB|jw{B=}Qx&exz2IgkMmK7Pl|}K>vR+p{eL_G)e2A68Up9Tv zV@+`sJKDQzOcRf=gn+Yz4<^-k71KL?|MIujdQgR-+V*laD1*3=!f^Rh2FNb_z);0# z;f=ct?#pY<8lBa5#^AG647)r;M@Js|{N|Y%$@X+fQ&0%fHxEQ~3Xxj(?k;P_SC%Kx&ovA`4MH{_D`G*i>Ep`Tv{#^g~%S|aVXy+o&+>) zZ~qB}SekgtyA_@}tx{p})=T$5!m&Gbs?W<@+${DS8*zF2uE*oDR=!HV-7X_XYwVdx z6=FYM?lNj;R{i_96XC7IAZ`8az8!AjxInDu>K3jt!(SMD0@5a z1R*j&Bto21+k?`}4lS#81TE0L%TLldc+)H+m3Rp5F!$n8MX*bu3>fV8=C=_oVV1Yz zPulnwFPfu0h!{At@O!)SIK@DCoZ_EY__P*&_^efMdS8==zFkP8!O`%;qrR;<8z@oG zc~HSwS6Gz!mo`a1>DK+{rJR|G6QX*Y0%g1?hIsX)h&Ly)Q$4qoYM$G+TNyfq_FQbc z2^x)S5L5|uUbk0}M1knAAX)KrfYIItv*w#>j2vmN(#O+z$e=-w@Ug}{5)qg@fOp=! zS#Z+L5nzH*)!Zr_D2awUIKqw@0%l_|2-TgpJ;LS)ZkyvqSQ^Aj4ZZ_5b&&q*DdIAo z>dAOecVCZIx4g3d3<(q@g3~iCWIC5t-Yws`vrt)y1#Gk;?i2*S<~pnV`Yqg9=x(E# z#m+iV8&F5W?`-ichDdPm@i?`|JSL!ZF*CfzW;i$=m(3|tyS%kAO=)qG-L8v=wkuv- zYxc@*M;DDr;=&cK2wY&iDyggv2i-%m;O9r*7hR{9rvh)YUxAMx|Vd!>>amezm(wEiOV zpG9slJj0@I;#9o_VP(xSAVIl7*v(`Y$(TZTYj_@rrqhP)7#~|uQ%MpcCy(gSij^!9 zfJFnwkME274@VneYGpuY{7grgN}k-LuB-tc;Hg8BM^LApj3&(u{^rebB9Mh%c`SIh zaPy*!5JCaqj7|)e#QT#pk*(mNYqw$7e{uK3rw7pUj{viv-T8{5nj$G;_|b8LM2eJ; z&3~0pk49J}scDWKH%_RTVT&YH2xkIq9gGqfR)Zr?J5qb!skz;V#oAPRrzhU1y}Wa2 zsKYpe1Xs-zeUj>c!)0d0Drbg66>?GVj?wJ-;Fc0H5UNe!W+CUq#BYr_KIG#ir%rSp zN)OwI7Vpn45f5DCgR-xSbpO73sdTma%JYihHN>>{*vx>c0wvK;*em<~lqI-V@^8(RkC^XdlD2_>)gppB<*HHfr?o(D@H9|2P?Z zuW8SiG(|VRDp_feLWxw;q=Y(@(NYu@QY0Zs z^CSu>4WzxSl(t!j^1VGe@9*#T&-eWC`J8j!C*$>eKCZ`g-PiRzvc_##ck1yoS1iuG z_8gm{r-w{;D8c_CRGihCJ@eKCDatXRKC68wM7*Rb4JQI<^)fGn#>hIbb__ASU1?#I zgS<9Echgzf00r{mjZ{RIf^G}=a$3aJ;dNt0vOc{o>o@h07K|eXj#WH;36w{LCcr^~ z+cDg!e?J$T;e@PhQJP~X+J?@{xHd%S9Td4oRY8AzW+t;|PggsHgs`@*&XEfjAo5-dc=DN95)^aU-06PE5yhTYep@q67!N=Iy0t@Q@7i9n ze`u;_w$sj!Z}RqzU#nx>Ha*mGzqr-IEHC;@dZqDzWw1Qs#3xN3Gu*hMbGW~MtbLv4 zw7cdPf6Lh4ean_D2JfoH!cPLBxO_1lOD4{IrTBrS_tTWr%=~HeMq>3k(~tYM*=Xc& z#WH}1)h|WXB?U`v$!z?6q9OhvON`!CRo#I#F4>-79K46xE7-d7-%K8%rtw0O>N0!Q ztVhS}RV`bEw-`4@C^#xSp%8BI0L9rB3xTqTqT?DA83~kDtF!(CLo+HB2TCBuLG`fh z(-?RQ`MmBYj+Gp(tS+owa{auqp{mo6_JuLH-H)6Nd`6($XcP%Nxj9G+fVI&46FoM{ zsC520yGRZ-k-86QA4sIUO^pJ2maK3fu%Aw+Sbbe%Ch&v^1>X3oqC!StfPUX_Q>&4x zvwudomPQ^txK(~b{Sn_?_A|JKc^0F7M+%r5gfXK*4;bSX+E8wq43DKHT~MQbB9?HJ zeTIa#jUvC+_0jrMxBt@SV{UECYI|7UxE04LRlg5QbbtWwFy#gzT0>DPb!l&{-LFop zAF=-OYPzC~vA(2f;UN;!=Vm{av7>y(g4ARw@a-I;f>nOj^)?@65a z*iDO{U{i6(9a>AKG#Jr~?xoQjhex2l4%-00p+8j##U*+;%FTJjimvJ-Q4NICc!*e<1Q0^1Gq;C5~#Z_;%9XY)l)o zJ}1r``0GDnLz*sm*tl^DG%;swQcTebE#l5^nqXh~&oqIJ8<}{n2SzkSVgZog#?F_KS|EyG2TC@IS}Z8nM^D_=+BAlLP0Q1vPdjxz zz2iXY?T_Z?@B*?3V9)tiYnLtaLx3s)vE>XVA$l81aB2m72VoMVS z7Xku{Ye$=+m!SSp6~YjDBEq;2>_&KQ1;3!5n&BlxLVjvd+flJIbjlQ2e*y&xsA+B| z7*$w;_&mF|=?1p9pMx}kwNb>!XlpO0YvS@5gUGSs`^mc_JpZ&#f)HQMPy>Lm>s08> z=JITrIOq5Zm-eUb8#~JDgs!x7+xJIj;D+?g*2xRgu3p!yi$3+k$opSM)rWDGN8i^Z z4Oruvf2!w*v~Q2Q1Q=SJ9I_bC0gKT5%bL&rFnBU!De&w2+Ryf6Qm}1H1V{39LMdh9 zr2%-Ja zPM2Ii!_bwV+OO}()qwONs`>{nHp=9K3-vxt0ifDN;g+gi&?!}AcI54ZZ<@;XR&#qN zI@hUNs1c&w;m2pq_9c1;O)7H?z66~Kd_1b3vEJ6)Bbrmy)W^@d(~=q|Vnt&Yc4LEc zdoNpc5V#ZsHH{;e^0QjmCt6S5H?(NYia7e>km|PNE5FxxzV%r9 z{az6nRylY6hUASwHL56ulRvyOIt}6fJAXCCYUGoGffr-cIp(Z4HGxB-1?oX}lRnxx z9RgEVR~TgxHBI!Y8_FfFyE6r`i}C*|06r8!nL?;K^0B+PoG~+=oYJ<80fS=k)-9@j zlhnzNed9WH>0$-A4Qa#hJe?YcpDNbCbS+)GKTF!q%lDI$n`UY%C4@1x=Nf8jy;6Ka zKCa}07tjt#jFs2-FP)Q+#QyuyzO^x{Iv&>2YVNnTl%c6t=|@Nwq>kM z^1>awujW6-Z|SJwlI)CsMU)#PdES&-5zI|l;)3Yn;$x32GRVjfgkgYf;6cN^N3Ax& zVs-2G;EctF&$IV!)d50VjWupq1>rnW?;MK)T^Enj*rNs>_mgg!N?S=K90r7Gpp` z+yt%pJYSZ6{-Hn~0!yBrhPAM8Yt}SXf^@Nfn^jEYQD3mV{8OBR4I(t3>Cmyq4a(TJn zDve5Wo)1F6O2fbw^P~x;c2X~Q_`Vv|%#3=Ik7aSb7aSw~tukWmyPP=G! zU$13A9i6^XUej{y`SkNcw1)I>n|`U}oBOfg;dKrtei$V;r3~}fkTo+ZTy^#RgV}2r zbTyS$eLY~UlX=Lr+fN@}5u3EQXg*4w0QD1E)T|`*xMDFhW;-cJO77F0|Cu!m!y*Xm z;Xe~fSZ0B&r;cVGCzse2Sv%~@d6FC5M<*fk$I0mch*VyY6iO@bWzVEb+>SE>y^oj- z6+{-Z2eS8W#1Xa2@EQ89Wgpr9+yGn<5yGrPMG`g@8s)?VsRPiJlp!Z&F@WW;yJ9~d zKu}Kxb)lu_Xar=BgEzm4azlt_aXitW3|}`o>i=p17Oe8IS5X-?y)`T(Ffgo-{Vf9fFCPm1U*Dn-}s z-FwbjfcpCXszxaCW;HuK-*{zT46T zeM4R7HE{Z)O_J*z!}v=D$0iqmI|06dkDgui!ZS|CEXJ|A%2~pF#=A26V&i-ZWZJiJ zEVdKoJ24sZkVMEkl0PwH(+3tp@hrTyKHkjV*ynb4DFxnTp^_nc-jHHh+Wf<)q`zo3 zcS}z_5ceQ=#?i*+y32{UJd_T{`1ZKjJ+*pxj8x%0*W8@??rpsy|6~O|KX?~84rfzk z=uk%QE>LOzgEAS)c)T5Y4A3)y5YJ!KTJ>0uIBqWY&T4%YJ&zDycpJK_%H99 z4&$Ilr|Fn5Yfy3DyeV;M@mx4=)U;hrs4r6{9-c%xGIM)St_?2-p~65S6R*6;@z8i#CI9bTM^JyyYaw9Y_bx2W9qul%bLz`%P+7(@LTYwJ% z`PLJ;Wc*9W>5bwose>{Y_110PtfXdj(r@89LemoYgplQND;z{_E98HOj2Ue->l#MJ zTjo=?1#1jtF(*nloNnVWpdV0?_+#9yd<2Y1*oeJXJoOB%IJL6--et9o4+A!i^9}ac zm+ImWr=p|-PSi>MQF?kh@ZmGB)WXvFl|Ry2TQ?}(dnS7%bi&rB1=(hWa;hCbB}6O^ zJs~Lx4#+6suWRy$#gtW68ZkHvcorIy>miD80Og{l+b*ol(T}Bro;QE;pr!aK_YOMtG@N-^Gz0tvVu!mGp#F%tZK#gKy9TykrJRyrZ z{?F#5V!29qP`S7aRYC<)N>vaGn*6`f=V2BRE58vz+k0h_FVPVHdc%y9k;jfb=DFQy z=^yA@@MQ9%=ub$uSYR5llIT z8w>uoxf_yP{**^a`!&nm4%d}hJ6lb+Qq$S?+qkH4FFVH=4etVIN^oJai&CBY^xgOG z`&eqsM9e)ZR#r|k2)qAYw^KZU8RUFDopnDve>p#u(n5(*DI(^L0DfKp0#%NcBnR^) zB}BXmv2%cRy3(tFGb)*FxvI{d`FF`sm+dm`u*%OOUVzwzVN?-i1x8TP3T^pO0>SZ7 z+Q1tKAQ#^17A+WZih=Z)!mba;kEwk)aPYv66^Ag^bx>uiWTN$1tG(80hr&(LE2*|* zqi7|M*7s>u2FtWw^!V3a*Y5jRScJR6Ei+)O)G-)8%qgl?@y@fB+YM-!2J#Jo{W)$- z-`>3+VGvUJT^f=Ijh=@aJ0|z=^tP?jZ2gj;))U`f3^=t z%qE~&F&>jVK)WX3w@?$I7^WS1I;Ld*o;`6COuRZyE-oez1Dwd66BZEa2GI<*2OW!_ zP%JP4-p$w?!(OplWcAfsTZ-_;%|G`CX87(J8QIV3(EdbwC#lWhTSG$aR0FTfHTJfu zbs|`fDX*t#!&Bzy7Z+AYgHYhciNp^lVD20CCmD@lW52}xoE2}`uYZ5z+8WJAGcKQ# zvN!ggp|@FS^PE-VXSs|D67Fn5kR@Y0cwP>+1(K82#AO~PNU5Bx_KX+|n{REkRd23D z2O+O+o7o=a71(3>+Y*DWDgXeKq6m@vsomaUc7fy+O@8AQD0f8L*{(z9u&Wo}w*vl9 ze9OOm+Y3BPW`M+#qy_uoiLB5!QGuD~WVpFbfUV&-;8J{f-k}4R4_%Qo?VM1LGP1jH zrh>HA4pp&~%}ux-c(;o%|DkAO5220AvB_JsfVQ%Q9@FjZzdCKL^)f(JM6smg#yy;$ zY99qjBU8 zrM@_*w7^Y@umP0kGoWlm6E3h65#NEzIv%kRKl?eP>BGiaggjd?Y67nluPL?IkT*#D zieS3}`zHmGjoIpoHLQda8%McxBuFPI1P2z25=U!m{|}&68oBT$zqiPU{;RC{v}01p z`9K$58KHnjG?NF-?)b~~{YW2A!Esf+kjt^KU z4qp7cY-~*udBEMA0EqktKU7jhakCUGYP5_lhNi@K3Nosj(cbI(?Gnrk zLZTkL`eA$X#g6TL-R-0SnO$0C=FXWj-0E~U)A+h@xxiln%;8eUXtfIfN@UAKPxVD2 zvO%h{!*Bfy%s;HlX)5&nb81|nn0Il_c(SbTPM^+A*M%nITzEOFm8x7uv0 z|J9o#>Ky*EeX3y?Qyx>HVhyJI*s;?Y%u1}Muewq6};g#bku8Mq;kT1z4l?WjEUi?t~iRv0=ou!g#Lo ztf#fS24m}&4Ub&E_lV=_^=ik)c3V_Dv#rS-+V`(_S2c{;hdcX{-Z$Wxl^0mGo;+li zSa-nWAkv?yJ)oE(CYFIAlojJBN6wHgME9s@6mq0C(|D{dE4gqs8^+!w{Fl1t;N}*B zhz;seAkM~GH(^jy-|diG<~+#3xqF&!)%6oV)qZL>ge}JUoZX~(tJV|$#^o)-EQS?V zt%&xdkQ}m~I=!gdd9bUb4S$Zy1;Oatgd&HoBFWW^3ZnC9#m}E~n^4DM;AXI=pA(ry zh|vsLkkrf;bQeOTz;I3i=V$Az@4LhIqmZp2VHQd&=us8`%lpJ2nq(6JBbCs6L3N!* zv)4mJq=TG)=t^UjZcPj{k*6hf)kg$vCDtV@Kh5VJ|Rpy&mF^10Esmzw5-k(!_8PF3L(X2TT?TamZveJ?G}-JsbO_giD9d#w?Qh6G~ge{lk{-k{9tE40hv98>(Oj;s? zhw)d;bq>O2GZyy3J9SA!=$?RSjNN4FRq--|BR_)HBpX%6MQw#RiccDr??^x8-o0mW{Vo0S>e>)_ zWD$jYZ8lAbRombKWImCE$HXFlNNk?Fp{U0(tF2vLTGX_3O3=&_yMjk|Pi;&r7U`cJ z9D%g+W?DSQX74KCM=vGTrsV+>>os6caG0D#?J1-k)d7=QapOuO$mpaxD7uXG242bNEtvRcyDaB65Qbo8(5kK z^Ob=iNXZfCB|ueMVTspnolTkUtqR!#+P9awDw}lm^JOzdw=b6$cUko%dGo}y`CraH z4PNY)xargx%>vzZ^NxEPTO=zOTeO=t&A|MpC zfb34zZwND85juan?(>LbD&UEPl{+`=%^&4?*kn=F+_P==GoSb z9jqSDw-K8?0Q7`(01HoQy%*GpjtHsX*vsE*_WsqH$9wMHlXo7b3re(LJ zORA*@de`hI5HxYGt107;7e06}a;ePI2TA*9z1%Ky$*%r}Nui*F`6c=3y#XSv}VC+0rR;^_E}G{W=$3+!36y6!~%mBsOtYb=&O{tv~ypajBj? zRGwpo?1L7g9nCBlw+Z%e@13N(Y%ET12da-h-hpYwPc3#MK3{Qv@&bRd?l$GvgX<0V zEHQMcOfUGb>)J*AgwrV_d*ug*$JrzYvWDKh#ZCP&bHoLVk1W}pg|Bt=IrHHir(I?K z7^>>}>NVd4()$#pUBDaQc@n~efzpma_XLo-my+4~4IA?FAN@Y2sI)jH=dR?a^~*h! z`%WsebGq=+w`oA#h0j?hlY{4)nB0Ubr0Tstc2BC$WYPb{{xU!2+(%jYOgfU*{^DhM z`}W4&4`keF9ir~j?a!3^<>k$lZM(amAr7=h$PoV znA3KsT(?XVF%@1N^FWh4(6=C>$fId3gl@9$qo@eX%&wsq+-2N;w9e@HS5ptfP7&7m z#o|*ENWC!3>wbGu8(G`@y?)lUwzWwc)_b;A9c;8WK;Xk#L&m!UrtxzKBLg!5Ric#< z^f{ys|3Klm?CCuA-FIz>%kNt@PF=cAu14pVNjG)hm=2m{rZ5PDERBdeqXl* z?JVC`kYwGKXssbYnDi!DKsW;fzkTj6DXpkOQ@u;}0!;vY0kIGkwWMq_aEq?AKL`{U z@aWPfF5||f%Xc5G;I$?3-K%M>S_^WATP;u3`IC8qhhZ6!GAE+=rE$YlMKAZin&7ef z2%>uZ+O}qTa7c&~_w^`mGl*z55O<=lkuI{HL1Jvz0K;1-G9d8Ph|U|fqpvj)^qNmFt^l_tGU@h`~po zL6qh40Gt79dM+p3nENpQ@%K@PBY%c2?)`p*l4**4Yh;~%(RB}1e2iZ1>Y3{YAwyLe zJYm8S3Y~(itO3{^U6N01=?NnhIStvjZ{GNa$bvXQ1B&e#=rnNW_ON}uvwLDp%5v|` zVto{yTL<`+{YIOR8e9i{TOm=MFCZZQyMpDuzBRX~=-~p4zoH7p_Vk=Nysb zZ0@&qa%kp1$`>qT1-eY<=cm>$;e=m5lqGceWnDUr#&R-1-}^58b5Ka>%!Mx=w8>1e zI5cFN(ZUyVDk>U#-f;+1=E4%=a?#LVs}$~5mu8K+_?b4QO83*d6LRa^-&}3yM?%7L z+u24(s6@m9_+PowD1Cdn1isw7bBHYsn#Jt(RSrAeY@H*+~XYUcR?a1`pq_GjPCNe+U9W5Gr{#R(!yS2Zv zX6WP*A$IX_ch(v@>XD25`%g9LaHaK-Bx_(g8!!>_Q{(}>?{{CsPtA2fZ$F{*S=RT0 zx9#@!a`blA>)9}}_d1pE%UAx?zX?_DYgOJ76tL;cifDp=mXCed@)o*RJg-j;rtxF< zPgQl%)4-m354>_y-j9X`0q<~vKGdz)-nmEdOpV{w?q@7lDD*D>VYXl}zSX^;H9%b= zV8LU<1}FO4kDoqKtb}@$=o7w88O>jDecS8LF}`nM9v-9U>Gi82b6&>WAEoszUKSG8 z)A0J&5GRkNHb&w+jvlz5-82~lm<;|ea0>ySX}mnEFPNW&fC-_U6RK7(4fFXLJ*A9q zNsFYw6&-N5q^m#R3EX&2E3BgO_k#U^3E3w)jY=tEF731m6)v1iu?EWkHR0?2$BF{- z7hHy|I1R+7JKuV7bjou7whYCX)aA&WwuY)6$&`Gw+%|5siSI%G9hqU7b3bpGqo zZo(d8C6YTfo^-r+eh`cszHn$O-+^?m07$pfo4`P~VcZJv8Owz|w9 zpn6zlo#T_65okdaz`&r68h!>4A?uPmttW|_l2=m>7qV?<7i(MMttsYhd zoj%n+p4W5T#@6b3?U_HlR+RG{^ z{F9lPdA?{PXf$CbS__wE6be+9G$t6lTYSHWu`8q~>pLeTz;clL#AFW=+9w+*Uei;z02SGx@k}U(CSGO#*DGJ zW7{yFWlEDsymOqK5oj{T@!8d##M_e8|h_5K*8B}Qb zK7E2|>z})q=3F$?QIuEzwMup0Wt+ITxXRb}>>|SV7zemq(5$<>@WqRR(P4op0ll4P z?0U0z-pN@yGY?1W-8tW|5w*Nl;75jDQVh%DTaXLHGNxQ~c~o)fgX7EegyG5`ayUa| zE3V`@NTBSmBdK&8ye2OPExh4VJGyltbHwm~NMtOHZLjawi6d%i(w>pH6%70P0yU>o zThh1d4I8$byQChxuAW!hk_I`gNzD`ZhyKg{+A!AlDx`>xbUYUrmWnfA4MC*rE84{G<=*?G38*yni*g z(qj<@l`q}PJh}>mgP-r+HyH!c<@u{}?_WpLmN2w-^?Vn{)yZE@IlH?TGK`DdTZB9! z92p!pt#Q@Jyzjo|)Bcni2kzJrmM*iRq2SK=^XD~s8ylI9du7vfaZk5>C0D1X=50#T zTClHRX}2L0CM+-+(uHWuyYIR*>Iq+0bMyOIUP@jYnEmdd#qnFCY#w&(*>=g`%cn>>zV)#KXS?2f@dAThUqJJJka5v{2?dux z*&LQEMtYHACD%(qAxDV#-e7)Cpe4LLzVSld7n`=8x%gd4kkE)*bffo<*W*7g+$Ax5 z{fdrjU!NH==-=zt(`o8NIzDAXB>gbnd6gXEmC#Kr_S;tm*d>;-glhy%>rBz$uiF8h zI-^wTdemolN}40_h-!Idy0i3m^#RT2Da-C29?|@^%`9`4)2Oq{_XGzE%!Bc|tJ<$l z^T#4#b^d&H^Pr}C^9x$mBy2mS`@-g8uUB>CnF-f52t zOZ@;BrLSPmhogV)rH2)KchycG&bA1BrG3*nnxD1X<#x{W(A^zNt+$Qargzlywioe;m?BD(31uu*xH_@aACK&ZWDL6vYMi0+9q9 zy+)2xPYoCSEZa+Qo;sKRuNGicctU%T!JhpM#VorMmIy>S4a{N#=_7j53wW|#2mOsp z_VJ5HC}{8Qx1xuHN2JGtu17nVk5oDN?0dP_D;?LHN3yhVh^iGj39JPJ-#vxg4t=J* zBG+)CC&`Z^Da~@l#W^`Sk(0Na6NZYvzx0rbMdFv1m^nhhv0hHPf6AP?xB6M8dH>(w z=k(#+oY2qsBuyS;0^_z6rEb4~2H(+LR+OwxM+&GVO6&)jjTViL2`y}r!x z#A_Q@$3ZJhR~}hVGV`c!7>~e$tcm8CJ@XW4RkTOdNh+utFSPNwQM=^;eG%iL2q%De zWgv|WXnyGn=h*4{U%eaou!ytH<4AQp{<~$CxAPz0kezwb$8^v2|Jd+w;>gwC3{-Aq zKig+ky665Dk4k`PG4lvVS-Agu`o7R($L=$5lU1u3{OO2tP>@&Bcr>H@WjeyK)5$=S zt{zBb{-=u$ACdJZAaRab&fU9biawnBF{$-!TaW#g7d$SGb>O%0{w*VI7^$)?#X~N% zF=(e=wp5>9>VtLv2wcE#N04*tlybg5FmNQ8k)+coHwT}?fRa!%5BT@(KC%NAA5HfhkM(^tYh?{qg=d{uRCE;&gnz)?KZ6ZB?ah)FB4Z9ynCbib6FLhC z8#Mh0_oYL%*X@qJfAfOWphvBx8zRMTjPURzmJ>vwa%GqP3_wK(cXgQW&jxWR129|> z*T|U7V0z^1w92_PE)_e{tS3x-xNy~;nF{+IkLBDy`-wtPQ&S_lV%>^zYJx!7- z4JL3z&+-Q)+okFxH}s!)vE$157low_ZM={kLe@eM9V~b!+EFsC7#}2#7WHv*dUtSM z;@Yx5_FZ>1%PA;Kk2gJPJT$;?@SYLjrg(ovL}Y#7MC33l(fz=29RUaXq<0yuKqkR? zLdi|pRn}P*CVOCT-A|Y8J!WeJ^tQK~%W1vEU|Re0XWpwuNsPR)?(*KcIpEVq|{ z2;6${^t0I!h#=6y0X-_d^iWi#O)N)x{;2o1MH)XqAnLw~jVx?C7+P38=f973_}_*= zB@fvRmg&{2SBsrGWr9M~6!N~cYqdxxL20z=S+e>?Z$aGl>nHIM{kll#_r-E>y{etD~i+3KX7okUIU>6x*0{ymiDn4#_|96Vu+zyo1|_aSZLKd1J&b)f+` zcBCyRC9^VWwu2e&h2mn%mC2L~u@9n~vH{?s*gqqbc}k{Y-3G=Aj?oHe4O%@CaDN<( z$h^CO(=|hK9;OVsW$7zjy7%!XERhevZ-%uV-ghn;i1>vor|wpf`8{marL>hN9vggq zer8ngZes&l@!J~8`un*VaUTldgXk_u`GKAmhF0DiN1l^c?sSQ5+giK>&ZAiT(4yO0 zXO&`k`HFFJzX^DVs0(Ta!3A(zIP)-oAA}>x* zB&gdqMK`Yg_e6ZWYRvYZy}R`2cY+V??)y4R^+9-=A?u1~v(GP!Vbg7jI8Xw+W*XpH zd-S}m`pMu!`B*+Wm0_{1o~x5PwXJnJgj9t7_N~#4-kg}G>0 zSv`AO=dGgyY&b3JXKNWeQ9?1oQx_ZdXmnXBE%Ml7ce=pli)sk7Cr=6#s+8j^OGWy@ zSw-XE-N?74u8^UutZRM6@+a%pYiCK_$E?MYL@eWTB4z@@z$Q6vtY%nq4I1fN>W%`!+2qZM|qkzk-ym$3?BQ< zpPseWV0?bZ;7bf|9kZ}h3$l(k8D57dx!y^ejY^_L*+N~xys0WO>?b)O-j!~f=Jzylncy3&b@v6oN8 zsK@$C#NP>b_oq$td~3Z*6Spqltvq6HYMv1y2Z=Fr{$9&*AOCyz#im;>vaWRPRAUk! z%TZ9qqi4Q)?HZd%Z}W&T5A@^4c*lG7V9V6PU;SBkWgfe8;rlK}e=Z6#vMhc){3*R9 zTkQ#?e$OA^kMh13J%26^0wSSXP8w)`Tx>he{S|*X{13b<#uJG??uEQeJ=<=D$ucJL zH9GQa9m#hwOrywz!ncl4BO^}I!s785CKStYXqh^ zw|eH^q7)GP*Bjq~f%Y`TN|-(AQ!EIgp%B$&D}AV{ZEQ@Sb%xHDQ&KuS^NiLl{bk^Y_)}&4&;mY2=~4dM1w(IK!_L$9(xDeCKYaN7 zb;97mV-VF(){%@U*2{%3Ax}bO()B zvf;cI%-!JT%nh*Ta;WWZASc|9Q7u$rK}r*Le%dU`lp}M-_KMa7 z_mYmH|HIC!U+4ph(Sr#@%wXIkyoGsC`VxU5AvpyF=K}YC_$@SAoZ#|5_47@6r*5 zW|s^*%-JKOhMz=aU88Ri-9B1umDLl-%s4v1-G1jd%_*S7;|S<6aLUZ&!#+G}DQft4 zY?bMt{q3ZytE;#`*t{Ud3`(?wf048%RX`}H=%kgZcZ5lhZwl6U6u}2N!T=@bH9uCc zHnqsHCBx4Xwi>`#nzl03y~zSY2@v#3E_dT?vH=gUqzIibSiav3gNv*cqLIh9=HTwW zKQmJw&Gj;LCoua`;n|BXT)4oxD`5`j<&#rXGy*gR+@t6Ua@Wzx$+%sb$X_1SF!{;j z$0N@FsGXnW-tJbFxrxL0^gE}5c0_khB>yc{isW32y5f0*+H6}ty zPXl~1HI=PfXUsM^qt!zoHnOt!kty(@97>~k^JIu@CT1D9{5UxU41%(K`-+TrtUYDf z0n#E)2PGIKRrClF5O)wsQko9{x`-1GVBC<`|FjnG%I$*&)$iB_}x(%FQBnVhAL3auAz(#>tsu)la~G_#PndAO1!m_%+9 z$ddG6A z+h1rrki@Watrb<(aSUMLN$%GtZ|T)3XUlOw0FfDW<;s>=NyknO_9cP!3(Txi7td}O zx>fhxiXDvmsGfw+-9ExJh1Y-1oF}v5eWF*E=5z{3#D6hVYz07MA?(iFCcUKIAW6Oh zAIY!_gyaHesjNi6m8qO9OD}>CAhw12LiN&K``aaGbr!Zr_r#=@$$hYkZpo5go=kFy zo3K1DaRX4dK^e&OyoW+Nm&eJZciJ#Oij|5raBhby2P;V-wA^p@c@>*03f@s&yrS!G zbr~qeKoP@Clr?}Bu~)@-8^B3L#)#;*pR+e!x9L z3a}6uGg=^pkA>RR^_7N>q?-lrJi)sv%*Z?ziSxLCK=Cb9?${ipttB&l2Ys&4e~NSs ztUP|sKfsVa37l0zNL^`dJ*eg)zY@liuQrHt2;mTZEkqxJ9*Ksui=hHLN$|!Er!^x$ z){nA!@bKYXm?v;XFQ=m;fNv~Wkp!*mCvP92tyo|nqu=C*IEbd!|92z8EBZ+3uD4VI zW|BC|`zD_}fFD#$m7Bi1mA1V4{F)#}VRu4J|3K^*GktocTDQ>Vx|;}==Tzw8{%sir zGNXCUwP#-v1G4b8Q>a)!wnwF>OuNND&fa=9E@W1L=g2*u`hI?NcEQW0uN&8`E91VY z?GvI8Cmaj`!IlC&JUqnDE0Ab?v0#x*@ zatI2nBm_T(H%FL=c)gg?>ZPxw+NAUm><_(!--nhD=>zgPrSVK&W@kbNLv1F2RvDk0 zSgXfAo;{#Cg3T|>8vlXpQXY!F zg_Op?s^@s%0DxHzJfP4o zYT?V&9BHbd80@XNl-A62z~4Kk9nzn=;#Hbl07%rDsnh{!z{GW1XI&a~J;lMj!f7Jm zWGIY75RB7gh-;nWMvCGsL=ylK9hk$&p+Me3F8sb08tV# z1fB?aHCC@rZ?2^evP7>NO-&{&>WSRg@t{USM;yAbDYI>nP_q$KiepEWvBHnO(=uX&X{ zf@C3Zptwd)GDl%L`4YFucG05ueY*_{HwA74anlW(3}B9$`>`&_5jBg zo<<6z@HC#NZyp5B^RH}a6cNNXla6B(Ni?6BK`C0%P1btT^{u!4EeNILL135yLV*Ne$q;KEryO9IY;ueSIi{rA5^Ag zuyKY*w*VdC01z|udw%3WtU2Hhp|T7JF9q|I91utb|E+?8eT*OQvN=e$!Iy9IFSvEX zU!?Wy!nYD5A6Hse!v#ovIlFoPMb4n5IAFJp%ZI)owcfss1+X1s$4{PYd#A$~UEQNB z?PSh~w9NWk!pRYqEOAKz2~uHpuG0OfM#0mOTa$_=u zamKuP@6O8mOxlq!W>UAcQ+NE-`aMQxY*oYI(SBp+yILR3fA~<-T<;0H`2NwWC{MB} zJ$z|%YZZYB-YI3#*n-zH42tYZ8um%Rd0cIx4`Zf)a-9=rnOLTibi+jJOU5jnB`z*5 zJNCcrp*Oee^4_?qv$C`El%DUHzVzq^Fv~d%-J3E;-H_o$xK}w@Cf_1)w1ngG^=hSF zk_Jm#>PGoo2sDvP+upJnL#MQN1`WMP7!W4rlL5~21&aX_6HcjgxL);bG}`na{Bva~ zNDM633#YP4Zvnad|Afr_WWOd_54v_9=S6eZ6FQ05oidBxqlN49;pP9jvR`7lP~0*RO`vdCl9FgpU_uoMVX`MM;ar zQ!k?I2%z+W8!s-?Z}{7efXSl-tX9mKTQ(f1^as!`io0tytj(6`Z-FFZ&M^;n+t zq-IWp>1ET0{6oB{K?b2qoVXLPo4W5rQV0UjPBs(WfZLss$!B}n#5D*U6h3|F)xDUd z>4L>pPY4Mi30=JSXiXDHFS1JRl#dyOn1n^hJu-QP$9eGO%71T}A-OB_vYvS^66#8d z%R!E+keFiTm}6pcSNc#G%mX(t+KEm`kOX$tqemX*cnA`#Pk6L}Kr9o?`uCsqb$i^P zcJp7a8jC;zTaE#3lM5~*`bS~Bqfr<8+W}q#V3wQP(GME(+NIy^ljg0?z0#=nWXss| zYwULLN!ES4Qd;rtn`WR)waWg1Q}Yl4)9ifNFE{YK5t%ULbF^o+V^m%2Qq_v2UZ_;?dhikx%CV1RZrT)~ z(U+AScW6WXTi-mZMQQ!-U*AI_v1+Q}8WU3o>(b05oROq-C~TVpW|)D(`k!L=8+4}OAr`@ub-`Y&pu_mV9*u^ODMiV3mTyjc6KjLTD=Vb z1Vaw0l}knW^%}qpQ|Z(eDC!%R^e*}B4h>c<`P2Dl?uxvwjqmf<58Wz%cH*C2)~F@Mr)SS>R_YN+7CFa8Ma9;}=5SfhBjMq<8KHSqPI3Kt>B+{Z(pOUd#Y_Oc zz$=M1)BOvpKZBv|r) zNzbN!=pgLfTGucno5UBZyxihpo`2U*IkI2Q>6D`MrqtlVv9`D0mNi*gq@=J&X5%M$ zUaBX(m!>;+r)kP&+cWfogNG&;HGc!Rz-DgH&t)rC1W>*qBp?^%h|1vwA2M=X#yt8i zOIqCWP1jMA!h87{sSMW93CH;QvFWs1GlQ*5Y32>H6-~gmpfuYZ5^jEBi$h(n%gW@` z)0PJ^U$V`_&v{yzm(Y(vW{CxY97p44myffv49IR{9{Ba{OP4l9yTvmUFyYN%$qM6z zBM(4yVW3bj>Bp-*Bt*o;`uC=AJZczatGoS`%guoMjeWNx;{4=xA!n65AN-KL?26@= zg+{jsXP_bJZ&7sTPM2ZB67&pTap}M!>Oz=HXfJIFPTIPff+x66UKL<7;R=#q{CH@! zqbYfRIZ*;RI7UBmAJVCieHem$&#cA@Ktqt14!kV_USZ_7R+No#q9MsXvY8h)R%KS- zR(7`B;U$c~6bc5S5Z%jn(4MqyA0%WCD+h%v*>pT5$z1b>#@y4Joq4#thP(NS*XDHG z8P*YPz*4si?SsdT&BiH>L0F(}d^9M;ivd~Lk7NHU@*vIwHe%ALpN%dC zSJ(M6_5%~xo0w}2H(J3RCkUe=$bdb@iNpf9`Sv zAP*NC=fzcX(bdW!wuxcsJ&avB|KAv1x8@1iVuPVecYZwg9;rjOKH7nh@!`4UVH2_Q zi6zG(juPds;L%aih)W&74KZUBkMqlMch*!9l^Qxou``NTWY1|VN2HeqNmNA@3&2IG zYP6|OT5|pJI$MqK&llDH$!mMU5D#UbH2k!er{`$=A;R$_FKeprs>0& zsWIZa?1_(y6KhH^%53`J;^Z_$awk8^ka6P_0t|OxeS^J5eb2L-6uE}?-aGRZ4oOPT<8gnAqr*gqr}Jxy%)i9?6_b8q0^%%$#%C`CLxBCicSHBE|5 z<()&g{>2B!Mq;MAh2JJas*!{HM6TN<{aq|zNZ_);+d&`>emE(@2tfVDZh zW$4I6Dweomf?GZB^?~o`l{s=e5#;koCLN~Dx{qjv!jlOJ0vGW2I5o2C)+gJ_9Ohtw&y&$=l{Yx*KlUi1&dHc@J(=Mu{H_Fg`Jzn`NbF-qY}=3 z2D4H8+7a|rtcaj^;4XiMVH!1xKq#I*o$un2^ea|ppH8xv%=4^ze|W#w)Gh+xdjNoX z9K(lS-WONKV|oJJrKOj!1hds`uup64%&3q_d=RvW8cmXi6;my}SQ$U))h3=lWVq()6seAdhIt% zMeZO!?{)nrT9f~2P+BBO+h4l1l*zil%5}x^Kl#Qz4+(aIkr4%o&S~1j#TlMygCq(#HOMxr(k71{J-Q4I`L06oJ9eF-^a%c@ zY^s1PI1fmj-b4WWJo?>$Dk8*{V@wGhxBTt_y^u|SQqz|%jY-Y?B?U1j5((a1+pvh} zZM+!$rurNwDqHM)#nu};KVPg35CLnTi(Q=-hmHkxDcm*+OEwj*IL&D$-=vF+-oLCL z5XKSUd$FlvRCiYmR&0q?BO5ccK{Z7HT+LQ*w2PEec>eZq=4JpAnEfAcoShjS*PK_X{jFd&BFdGih6T^+HiBd*({x7Q}S-g3>g z*&FX(nu!B+K*Utuhj0+9P!?DK8Z?*vBO_i|0MA=k2l1iED=DdY_vRxT0KO{Tr0?Gg z$S5$GH*Y0nkeHOc79KMHY?8AQG~LSD1q=*e0m@jo#ffM~;+~er$IF zYFsR4-My#^y)tN)o)r0tioJgMGDlmTct-VW0#>_dNh)^kE9AmD4RqUzl`s=^2cXTJ3%sP%rCxZ+eWb!Fe@WcomIC>hH>CX)qzK(;k6E()Yp+k<7e-H^VAW~!^{6x!recCJ zBn>?UeYC4tZ^gAum(LXa>Zc!*-J0B1eoTm}u_sD^_{GxsR{PI$XDrUG&N^J}r24J* zbzV_|Rc=Dyif$Loj)`*v04J|6sCbna|8-{?3^@nIQC3!Ffb}1s`02}p*x13m&z*`~ zxBEP&j}u4*XC{4H<67D_-cT-9koNt1_jYFy%AURx_Tl6fsx=BJI%|4E@)W$zq{7VqV8e!s9SxEwJZx4MwmzWfI4tn59xTXcb`7O{RIqvp8-Uy zmF7&JfqW6 z5J^oPKO47S55UO12!6R^>DP(o4*)(KxV=FR&)9n+s7L%9wHz|bfld(-t_3;WLVY5# zJraeqDUg@p!xtw{l%yhU4j`V5xI)HACu8bzYc4D~4qOSq^AijAe_C?dZsjn`&85>d z+vZISv^Xm{`(SAE+eAwXO`BJbe)aqF-FNrQwwP?a_^{KXw|RMe8hY}d(-(%q=i?tM zWBO$09t4|c5s0|4ctMsu5m_uN2Gobg&TpU8cWkPD-p#4e<-Nb9xpw=l-~V{^X4PrP zNhnkbXGdVk5EwK#3UVxE*l4}zeU+7${U`lFaUi@`&{T-)K9(%8nj6iP&)ZEb zmu5rAmFKba6e6hu^^f`+sUuL3A|MVt-~m-c zXk-vW3}qIhv_7JH6U-xHHRtL!DL33ztd;R@;-vX33F|G=b@ zHr^i330G1!^kZk2gQsE|EuZLDfh^BZ2^F_6ADYO2FZrwtl-}V#=V}vnPAXt}*|f_n z(x5e(0dyx+4}_cCVp}hh2I!f3*weLl^0iF$a0zoKJpfT6QK1G*)h(xWx z2%TpaN1tw<89OIbE-CfSrEZ-}15O|PzGRKl$kkgXoU_*GlvCCG{d%>3?k#ZYG$x-Y zX|aJ*kOiQu^>z)29-~Z?dIjw+3@)CMbZJ=4Quia_XMnSH)^;b z;3#D*a4q9iesFBUygO~Pw$+LhFUG3}TqR;LEG~%bfW9ut?V}r;i}l^fk!{xcNG8%} z$?eUx-d}1%igv}p9ZHW^5^E+9&GGu6@}Yc|qklJ)?2(bVPB?UsF~x6~L-WpepHq|Z zRH~qmK$%p%S#@yFvX0Xp+n^cQgebw%<>_|O$Dv6P3oTdT1}4I_eswCl>4+{t8kJpA zG63JC!Q-Qi$qChTnDyPy%RLgKD$FPQY&R{oOG!?y`~90wg8-Z)g$EsvhO|&f1e)Gd-1#UhEmTcy`e7ed7GC>fD~?^pb`a{{)&Wnkf=a?^P!Qkc|R z?c6JCoD|ikxM)gmcbsp0CiT zGiF?Hd#*(QMIlW}Z)_0+IW9xiXh(dbXl1rs0!gO=8S`s%jWYv?-8UUy|>C&0^ypEpiPd;Tqoih|)hAUS*kW`m)JW0uW#mWmEB*+70W!`vSCj zU3PBmPO|v+?IJC$Nm;*ohce1eFq)1o`d3J6xkqCT4JEXv zS%kLQ*?oMGc52;0eFeomdXqb_O@+;G!{aX9NVc3=`OWj~_!*%W0t{otuFC(ZI!>_o z1PugmmjmMj^tf2|Y-w5Cx2@X>(?6-B4r21|-Mi_uE1lX>0Rq=y>*J3G zVZfxnXC^s+Q)4_gHE+h!U%kIqR*GF5=hM@3h|1+TGfH>rD=8KRlUjA%JEl!7I{-N# z@wJAwt@S+vIcEircO}~-MPi|#>xN|JHUK4Qr|pwTcOe9~qKe1(`dn9%9)bq;R&^<&e(IXA|~=ai<8yucASRINAEHh$67 z;Lv;>*W3d$jk?LmBsk=>IaOzzI{9yjR%^E#rg9zoPCegu#6t7cfjWJhQpX$~`b_!u zIrZSc8x9lt%vRhMB)Qqb(fHH}#c9UvI>{vt*{&Q>y}$XYL&v=@ByJQgUc^q$$s4pP z9^7cjs4m&-ul)i+m*kq=%`R?k6#4<;ci(^hm&|m{ZTIDSbz7PTGs5&z$2C#*@#Apo zP$oJWO~V-d?H#A)cylm=(9;lOfa+2Vk&JnaC)_lo6H=qwhs7*?^-A^i>EB~&8yXbH zn+d3u>=Zg0?zKZlj}jW+gX})8@w18@;iQwS-ip@|r4dwNiLeRFPAI;h+t|aRJQ!nxnKO6OE0Ump z;Qo67a>B`o7#~1(I2p(}v5XdCx4qx)-CaO2e-E!He|-W;8n2)9-htf%%N_si^l)fz z7n86F4EIN7o;){=6R!Vr?9>q@&wM{ImvJDZGEG$Cg=(do{6~~KY=XNz$=c=$c2ds@dp`%{lNl`M@2n) z{u~Cd3&mDjTU(jcBK)uaho|$7>$!j1e?}xLR5l4|XqcHvBnqXWA!THb?2$53BqJ@A zS*fUyGNVFPWRtxb%5E7&^n0GJ@BRD!ao^YD;;PT*{eHd9ah%6-oRaW~ZoQ^mQ4&)i z(D8ff-yd2Jlf5X!g3UO0pqkif`EJtiURF3bW4Z6RP#nof;L)Q;udN2;l;p%(q|)Hq+K`FJcq2t^dhS(3~jD z7>3l-04pJfW4L}~S{)S?mGulPEw8TlpQM=(*7@fZx6KpOX1rK3q0n;-C=eIk12E6r z#4y|vKBRWvpM-W{(V8tEs9l=Q7()Bx{alaU6f~mG6zJ#f-R81T7Ul^pg%UnsUY|~F z4G7cvuS>=aAn=H(y`y6favKF#FE|pU^!f+1(p}}eM6q0lyynUtIzTc5_pEKqqTjxd z<$Sac&bEhsJnlKzC#X%7PVZAgcF(>NZ5SMU?4G9C z)X<>flkr%3dUZqh6FGatH8_sBRTdxyo@T0$1B8Yg&FN< zz}rJ(LC#W9RqfX8Mv6NVaJxf7)_|de>&WOX!Tl+GX~Z5Pn3Kj4iiUXAQrY7=-G?DB ztm?m`N@mZTc@6l0eXc`@G}LRGf!D!N1Nr66JDXg&e?ORmRajj7_7JN|UXWjZGL&ntTgCN|w{`W)|I-?>Hj8IeNbPc8A3weZE~NNmlUYmuOQI=?tC&p{qy5G@vHI}u&5il-Qwl2;o)#%;$l7@eiwCSD9&EA z==jaWlu~@mNFWVc^btvphRz3WJ$W#7M^Z!0^$Ew8)aWBvXzhC^FdJ=7)k56?#)gJL z)W(h4J66;4HvV##!f_-M+Ok;}DBRZ{?==BwAX)V~Pw%zYAGm>v|0eR&wN(Q(^;Cwh z?&EaN_>;W5MrAx!#n4)az9L!|+?N^g0FX79)R6D8p5HsGTG`ip^@bL~vqt%ScDGxw zdV3op7iE1qEd zIKj2`rEz+Jo4nTS%}(5y+D&~?-{Q+M~yNT$g%$W`J&Ue?==Y|kPv8tv}5UeVd$ zy`p#WCjtQ`0qwcEZ$`g$&{pLUPDf`&wLxXbWTNq#UeQOs%uSV^3LY0vvZyl`d)M7m7C9#z7%p6i3D|SuItk~ zYHXFAl~wZ2p!l5M2N^PpNB+{Km1fy*K4~WHb6(p%Tzc#3m$cN=!`4J48GKa}I?P67kF< z$MV{_D{&s`0}U$jSBLh$#`S@s&C;FbQlBz^HM1&$Cd10UcR*n3S1;RMqec`zO&%L} ziO%0&IhaR&;f(Tra%X%L5UKCRjbb27grrgv$0O!&n%pGWb_rS1O@LQy`WnCgEicX15ii#A}KuzxXXF3DbXN0;gO41;h1a%a-|-r57lp%tF074ry$F@dOOh43QdJcGdRPl0zU`>>mYY&b)6;T2l!4Jwd8}o%shs*HEoG172 zJZ&`S1m=Ep<=nQ+Gq(q{2o8x=UF(A2JEK{;YcuVkRuUE|o)COog5*OaV_dnF^Teg2 z1Pu)d@nxdKPZ#6@7trH4)R8m9r3yF$Xeo%#9Z?pQIrQQAM>Br}YxG|F_f`4BJo{N* z5zkWwS{6+wKwqZuo%7N_2S`%8+KuS- zmdDOEBVSI-eeQ#1N#<6k4r*%VkemePrbp4;a&NKL$dqs zd|D&iTv<5)dHM3?%Z1@da2LBG%3^;PZxcnt6eH>47cE+Z`DQ&pCF#&cYUY!YPiB-?(YZ-_4!tkTPj#V=T|c>ONc6J}NjO!f{aY@sR`d4i+By;ePwXw^>df zCp|di+WD99hm?@lJ#G8i8X8Vpw`XJQdPU!n7MlYrCpX>_5UtugGb6*kze~S<{XW1A zw`uCKOQrOQx#{vK2Fl zpFewG^+f#z{`8(Al9{^WoWWzQtqnsv?LE+&=tw46g!YX$DtiL{hjlT(8#Vg_<>Z0+ zzgMr#JHyG}xkyy3A$#|ZWlHDJG6bMU4D`A1^~1JAeEj%vz~I4485qhKg-7pnVeL?b z|12m%$e|G7?}IOra(p#@AXHX;X6wxcP2wE(pS_qUP2&a0hDWk7Vf;En0RNkrjxn$> zh)s&aE_r+OjtabMAZOF6sMHwuVS;h;w(snnAy10oEnG_rx(Knt5-%K9=b_03DWk%Z zt)XTu=L1)+tNnB4t+V3W&!4w(T=`n)L3i>4R4|)kPU0_Xo%+sw6u`gA$Q$@*!9y`? zRUlPsULBDda`f|ruvJy(%B|~ABoP;^S>@`N3c={`t!gGWej4X|MKv6qqx`KfXh-3a zJpUuE@e1mDI^{k-a@0P2V*Hxwucei$CS&%eEOBIKx+?S(_jhh_>)#g^+8ws)N^jn| z^MT$vIGC&#D9`(K)!8Jiy&Ec0gXTrGQZctQ^GOZ=$S=~P^qwo2)3#Al0Dp^Mu6eWqgNW2N@*ZTKV0Qi_-FM(BC59IQ7!IWHBf$gA6qJ?*A(TQfU@>mocF07#M8TDHS#w52 z>qA`~9=xujI&gKyfQ~xK0iHKNOSkST{!NRDRY&!;%DnBT+?_X6CEA8L;5YVm@YeCk ze;gYZOf_*%cjiiu`#CpueU*F8^yyps8H8K%e>82?|LOPdwiSfReB^828eJ7&DgqXJ zd5X9Py2i485;|G3ld+m+=T5wMv+Vo1U#vqnjg5t+B=$YsPu^^9wf1blv110vjeNem zj8J^+tAFBp=Z>>{ioDX2bUj~xb5+`;Ys zJ^PI*#b>ZbMz@saMjWsywug9EmF>$2-!W{^od*}=J;HyV8FcksQsV(-ZVL_QSnD-V zeZ3crnJUkA20t(RDI<#N;ys??M8InX6qhxGef#xGZ*{hwRjx;W&tBcTW)7KjW=`Rw ztj)oy)wQ=#^K+ZAe;uX1TyQ>-6Img8{(>Jrf3juMX#P_p1d-qFlo;4nc<%_G z-+4D|<$DAo8)Cbc4s9CyIJ+*uA?Qb(wE~ zcIXYEeF#F*)$oF2dCx!2sn^Wy?;mz*QT_0c`Ht59!NC)Pr%j!(r4T0GNd1jvI? zA|6T2ht*u9S+w*TqiH_X-u&5V*GD)4t?33-MD2lEsTJ);E=Pyzu3nhVEvWx*$#|Y$ z`p}kYTMqF~|8s-qXVA=Gn$n$hHpeSQba9N%nGpSCWRTidy*3|>!VrSc_rMm_)Ycw- zct!4=JIrE&>A;9p_3z#zR*RuKN~kJ|JK==_9bN|gV?IC>t%iGsX~4SVe#Mn0$#%DH z-D=bS$a{rg_=|4u7O#yThpt^VOc{0heJk|v;qFDnGwUgT`_}6C$+6d;cH04n!Nz8W z(o*&c(rEmaj?-sj2dY~l1h#bv_@;>S0^hWtpkOwo{QLXA8kG;6n3n0uHWV|)n|{~P zD9U6`cPZ%jjoe%oUnbovCxTyw^NyU*qxZA+4*g3UH>+Dt{p*)Js(Z^u5NLHb*p=4f z#!q|L*fH1nQF)qx7zW>*x}G-Ln&F5h(a?U$2HOr*$HC%@#16Jv?{lbzCw@jdrg5U@au1EmJhZCY92>-e_;Nk zza=+wI|W1^@#yo_t};~p;Oo|_SFa{5T(DVQnv3})m1R@5zuI5>^XHHB6f}|}bi;eE z`7!_Ht5*(nO-*L;n*DTth~5OIXQKn|bY7($rYZUx106qFzG(3x6vl6M`o1~SPEF5t zYTDA^zniO7$4>3vyLYTqhL3A)GTd9&K^|IEPgc02JN}TVe556bzj;r9dU~`91qJ>k#L? zv4PFlI;lPF4|&Ycd-C&Y>p?x84!b_tP*2DHXp;Y%Q(AD&m}h>{4B}oJFt`zKZZO6) zhDdCZ(+fyb;Nl%*42*4scU5SM>cbM2N<)Do?t^!Bsh^~Q`-5Dlw{I`;9nP>8HVXwXlo&iX6&Z8Ub}PL5nFBW=anhNugEK9% zMjxIwVw&+7)#h6_ObF_fqSnMc0Rm`2>6x4!{!|4SE2&HEw>?X?Pt0bxZMDc;GbKJ> z0ej-=XlArafB#qjw?!5eDUkhbTBw=dY%pW^lwUhB^oWg*H|N1JKg*{(qG96xet123 zb-i7XgP01^J`ZEYt}s;=8h z`hwqw;`>IKYsWy0i#VBeZp!2O}_TziQ~KPorbK6u2=>628^v0jTp~| zZlfXRaowRQ?OFt2XJ5(pIdiKHfl4G zdBeib%qiq{iH-z3zNNvHm0zA;rD755GazRX9u0fwZq>)!pK?m)i`_>NjYBO8G8(b)KRC zRsf_x;P=4O{ldyDXM=^hMe8Q)PPY5Y+U6FWI*ogp`O3TYmG#*>#^x61HS?0JM>rnp zNCZ`LiF8+Q>@Myn)NV`?+qG*aMtqPzmG)1kvjQo95Hq5oG~VonD9^WM=ZzmYhZsZR zq6x98Df2v8?ZVu`1NAY}ITk?r#Z@6l6p=F#4is&8j7{{hW8=C1pnyWLf>envE&zX3 z=A^updh798tjAAdHW!n&iHJ`ZS-;%qz^%QmT&Et&0r7hu^fdArjX}_MeERr+ zy#0M(zy-y{qhJJqZE9ggpg-yKy~5fTn+_g4IFNO4Fm0JZ{|oPSh+p>(TbH#nqTq!M zvCO)T)z+_(k)Pu?hMqCn6&dMN^pgKrg{3Y_mZaT0yJJT)(vdhl!;85WYu=cmIIj00 zGBn%7eL3swu-a`Km4kVGtML90BdQ@USzvUm?^uw3B9T!megXyW-{)`x zc_}eOMd1`t>?#R4BOot<3O^2{UZ~Obm)e#D8>4=&3YX*~G zx4L?8`da90`WaYzfXhLKYW5m4XU%HWvEw*~4sW)uk1Sp4 zwAVf-ZTy4-TS%mo#bp=(9uDva`H}5Vy6p#73w}a938%N~Hnr0ekINp@&c!2N2NT>O z^qeJgGvLWyMK2?!AT-4ufafwmpu>uIYiVvNcAs3Nd%*Va-jy}#x7asibqIeNx@XT; z7&??A^hmhcPUeMnvsb%z{W^O=WP96xXeK^e;)YIbJd1L`Y{vApjVqb^DxhPCrnEY+ z;8X95d~Z>qLzv_eZKPxF8g7|RPf84(MaseNnl(9hL)?-A0&-^U;AMHMP%n(4MX~g1 ztJ!@lC!e`^?a=a8!=WcK)Di|Vcgn$ggR}zBQIt>RVQvR^fP@7kko~Zj*J>v^d0WR2|TgIuqA!T&2I0|VlfY)a?aI$ZIF&?J$d3W%x;Np{|FsMT`wgcrN6-(||gj z&O5Va)G>F1dI?DlHu_)+km7yR+uc}u&0}TMk;S}9Bt5bR0jQOk2>J|q3}^z87eUJX z->@Gfmbv^vjz1H!R)~;!wdrW7nSwy?2AA8y;C)xGH*?=H*Chg0fEp>hW&DlViik)U z@<91n%ntGVp%6t4Yig5SB8NGmv0**15LW?0rB!1BDe7Tnm{-tMfJ6$bkH1Bqr2C;e zn~uG&W&D18MqbSbX3HpRR6Etuc}Zg&UdajoCTB0NJGpH6SAaY^Je)R11eeGc7zcvA za1H8&)h$Ri_(c@!QGXlPTZngDkUETwis73e*L)ia+>@{}ar?1^xmKaa5(Y75Fo+?J z))<52nhl-}1XfmAi8o+FbYx7SZwj@K**~dG#Vj12)JEv+(8>7eG6J9=ZV+u7-?O3^ z?L#kmXH%U{=UkU}=|5CmQTdU};ryfTZkJnJ-}>iV%87~E+nLn~#kHcM0&p}AblUsp ztCg}BPZaB14e|Es%Ad-d9Oz5_25Q|vfPvh`MJVRMe zCnX99QhwzZiY+`hpJc5Av5$&6Fsq?2YxrDTT+H^YNuLeeQ{ocK@Q(u;mViA$VCMJ#n&z7aU+`+EoNq)}>o&q3t zVoMEFhkD(<``olop12);hnQ{mUqB9RC%G7Z4)I}v>5AdzR9;+mFR~vU`(~)I%3p4y zeHQC7L7k|7v0fPo5-o6(w)Xqbq~cpC#Vzb2&2P4Z`iUzzxZy2omV0@5@iZ_HWt*+1 zafeTF3#%zAW_~-^&TsO^-sl;oFi{{ zV&*v6l+8qtl9<|DilwV(n%r+uJw$*y+3XN}*WAKlI#LE?lhkGZ6O)>@?f-~50^=)o zMk$6pS~29)DJ~ z*SyC)W3pRK5u!}LleZGoo@*z^Jb4+GI{TJq_cJ!Uju!uDC67=EZrieD3?IYXJn^LS zJ1+1>;%(q)$Csz`3GOXaRBUDs8TPH~fmmDpyu3XBJz*sOX;|RX#LMdTi#@up%G5D* z%TW2-Om>-3)sSDBVhl2&#cX(lvQ>`gISVO@S^K4klAMU6;+r7zB|u1ok$*yKoPVSp z?Ww#;bA~dU0CeJQFmwRH5{((7Jf#DdgnGrA1(E-E8WJ#8%WmLbF6PTDpGj5o;S1(k z4BKqIeb$G&f&Kmtx*2%Nj0__jWI71Z7BUTiKQR`%io&~o8v#why37qC5ZA(@oE@Cs zL{pP^D+^5N1b6P-8C~H@#AY-SPXZ98F_~W$1%f~kytt2uPt5s7>E6|8i(I+bA|kW# zpY7ohS8=YoZq%3!fp6-MTh3_5d&Lt)cw7G0u0*FBPY0xy5f^8>@pSwHaNH~k_g!Uz zLm5P|J_xj)G?+l)byyRqBOt6L$6D5`vr^8!I)yMQzD~@a&92wrg8%u|dB+(?Fj&fd z8M|Y(8itKA@J_RT_n-Snsdej!C!OqURQmMpT>$6d8Y_X;(~DW#e*0y}BpgnCXjv6G zn-Xywwetyz;qY|fs?lK44hvhGpULoFbM>NmB2uam3V;%;g&v>`V+n-C?NSgA({ z=?$srt|L>Ta;~fNSP`=>9Os;8_sea)pkjmxne-ZD7DbKu7isYV$E161iO-smdd=O! zIc=iActY9N3>FW||4o?S3W@cSu-5o2jokLeuF|g1r7?7#9r}xQiDWAsyoduCT5g9t zM^T<#YKxyds#Eiq+pUw?vlET0o`!|beIL>deJw+6be|0`(pFEB!Fn}o+Dga^OO%uLIbD%$3iz;!{3*k&-J)jwyfu;`c=QHwLV)c@uRN@m|u zy^f6hNzK)~5g9cvcz*H{J|QGlE`#t&1RrIqZG1?X1H_$PY`7!jHqah-Z0nj4gO`7_ zAKv#?PEJ+N%m`Ayc$_Hk!@43jDazqy9#FV z5BOuGOKVJaGsOw&a{u&wSVcaMp*?_6;ppS#brtPu+{6yF4u`)pv zL?o+DSf`~j$JUJkl@K3HW>SYZi`~=52KTn4sNJ${oA{YcVN^*3Z)$V8bII>sIIB+X zZqy4V5=2er(5ARCzsLGn<{1ogWod6={gE3!U(nXeyVv~&N?Q;W5iW{A6xJ@jGyl_u z2yh6o>*T3ZFzrl7jM#{LG|A?SBaBq;e|BM2)hd8D|I(#IJ%@jOmbTfSY8tw$SAa8G z!MLYRr;}&`;Y{ov#pajK!NBqL`#)Rf#;j_UpQ=G5f;?Ey((pL-tm>z# zw6XAec`e;hR|DWO9FxVyVrRw8Dq*C*t<%umR9$7ft-6aUi^pMcL4*a1vVQlSLjuKI zEEU9@md~;4${YfK0wpc6&#`JRr#$uegnJCB5f3okh_IPscVVrMTDi&;DpB2_csIAv z4Vzw@^U7nSYQa9io_{?y_1Ht~nzd8>dt_A3{B2UTfIxS}m3wzLBda zr3qmTwI4?9+q+j%KIx52EL&=&8KjbyGLKs+*3_#`sc0#CSl+blw`|PLsHhu+ZGnNf zs-n&pF9qHY1Nm%ztu*nJLmD^{>+9=7uwvNNo{o;bYuk@Li|ll+WP)mbP#Z0?XNMBYig<%#}s^7o-zKS zx1AKtTrpwIQY=$CW>(JIrO2XU*&)pGL^>ZaRv~R^V}D1)2lpDUqJ+$03`#vbhrz5QhEX=H1>kIHV$~w}ZbZc8Xt&pk5?xMxT<4|z{r&wB zRqGNXTBlyPJv=!6$nK@LZgiQtUvGE2c5T!mdsNI^mh$)y1e!G2V=H(!aF)O}tus~< zbMI1ROJPqtjhcZ9&gMa`nod1@H4hIl@vbt$N2MQEOBz}-SxjG8ji&ht<}E z(&ZXW&8{!j@c;(GKxL%PnMHOz0CFK7$-?uZY%>4wA*2^G1%dWE=~ajDH`k6NypI_ikUPhmPl3UwgoaZHby$VfdFwoed#Qy zqN8E+TULBS64&n^fy{E%WZRNlF)Kl*&Hf+?0W2#S6xpF39)kv&b{obIEf^epQ6Sdp zs0gcQap5So>(-gE=O)G9o7qGE$9}^dvjG>=bannSq>RQUhtlA~Ef?cRhB7<^F|%bD z%^ynjjlIY;$}uuM3s7jS`iMJi{{RTw#l0kRTtlmT&&y{?#t(e0%{ILi9_ zYo6M{+2qKy8*oVAFb8SsZ7oTDXBi1mPt=b1o zy>P(G;DEvwM{-;@9NTB$zPwr-C)*>Dq-Y?LHq~y^x3#M36<`#_)2#yc60{}@i1eev zx^24a9Uw03d9(<~Sftt#3uvJ&@OPzpE1`K51@`=`Ab# z6D+_2g%$+-taGUkB!*Z2tRi%}V7Sw3`Zd*xLG|B7yvq$@x3Ey&6gU{t3WkPW{;!7w z)?(1es>r|XGcSzQnof-KZCF4uznACZqql`AGuItjO*xlAIZKn$9@dYP&(OWWIXq8p z`uB1%*k}PhKXUTqj=ZH^29qs8X~Zi-Hq*n1Ks^_XP4Tkj%S8o102uKvKIHG;P5KW1W>%hF{?H&@8O_Z~k^pj$xc z8;r}6@>1f`omlg&?3HQ+v0vM6_)ttPbYmZFrjG0^4PF>7$nom{QKChf;f8fVbP-E@g#cEaPEJJaD#cr-t@XvX99 z%~zNlXo>j++`M|0wV!#HSVmnV^pb454IR1_XlaFzW8I(-}1*dN4L-T-7uvj~aizW}UNVsJnY4z_|j*R>KZ|7LN zANz2TBo8M~5`}D6FU+o=*Ug1(L(!_jiB?o>L?>8YW1PD1KY9p`7(jT-;Ht+2k(1An9SW2#~n>TOP{v4>T2K-_=cyRqyXoQ=j~c~eek>$HzsrdSS0kn&hP#Z zlVlA%-LdANK`vDWttTurYW>YG_r{GD^E=?XxgEEq5hGSxRnvzruKqitw_TqbV{-DH zfAmpsvtK>j#b)=SS68~ytBmCQ>DqDzyN2GsSU0Z@R{+JpL$ z=n`HwcUUvD4n(oWK@4cI&ufw0p$=J;Ig|n6`q9xz5d^Nf$LF$C^U#d5;@P~MBgtKTHZ5%ociEr;{vIz)vZ|+(D zpLJ}0LCt`h9f0WRj1MLxjKWQk!3%#SYjMChlb}upJq)e;t;Q)U)S`KW-F;iw_`6`b z0-fH00I~@;1^$=4lzNKJPT@E<3gW;%9GnDSC??@b1)ku^Fd~U8J}39M23v1b z4&5iL+HQje*)zHJO>U{9$IYm`+UG4l~@oOH@5iLs1- zRU%ttD0;FBN%b^^vlaNS4=?d~|K-cLeQL{Bt*C1I@!ZGvPaa%nc%690R_9ITkgQHw zT%q1{7c|c%KN7FXN`0ex~5+d%J4H;``8B4<< z8tUwRvhEZmm+n4SGN2$xjB%8vgPMu49En(II`A6|qc*Jf0LHT9TYh+w@@AG=10BtZ zg#Y6Ga70vqnzB`#v!W3H;V<2Vkj=@k*{)kg+IlF)!lnb76~+e%73#lTPtU6nQgy(* z7Y;8&sn%YvQK3=nQ<1Iu@wqJl#lSv&0JvRmhwo*lA!|$0r%b(yuK-dCgVU>&c7}Jf zGV{&2x5{Qwx1%nZLE&rn^Sw}j#yNk0v^w(89348>YT^3_^N+ikkHS@@d;h44C;425`R#swh+TlXFb?4PeB#;F5f`yan2%>%fb;2^xp#B3?v0+Jv$eWUr z=QT?CQ8;DpTuafCIDwFgqa*&bK=bEEUnm2uhP^ENt;ow+i<*bu<@<{jIAa^=%F|}f z{LzJ70dco9VFGBzi~Zu%Idg7+>H7C#JOqg|i0sU$R6WF-HKW05@E-qE=(RG9+V+wc zdd4l{=uv&~A-@;vx)ZWLWN4J?mo9AF(D34QwYd9J>=kb}TRQv6*UGZ9(jY)-5?4N1 zXO82~Use@hHlpWxn2RizDZCKqmneV0q;h3slPy>f3L~w71KS!JrfB+DnUDP2 z_#o?D{SJ4vU7`Ky^=_^UeA9Y2CpytC%IqK4|GsCmr+yj-Vkp2PZIRM+Vb9?H#-zW_ z&6{^3GXF4`HHD73HKUW48q9E9o#Ss!?5#H6CkQe)90}o$1H8!26VVUc8pkyyt{Y-urzYkDTn@@x+~(VaZNAR+N9iSEk2+0aYgshpll}t?^-j&{bA=0H?HKqRT>f z1;Bt}(pLg+sQal~El0%Iz-pEpDDb(=KPaqq_cQ@)lwd-Msvtbvq`7sd@a6JSEaL|0 zUEvm@sp*;fX}U982uzgG`FHQxb8Oiyy#P#4pr5wR`u6rs*}^V^$799YeI)r3K?3y-x@{R{Qb~x!oa_6#jp(SmoEs52t7D&m?iPx{D8%! z2RwhiD=VB}JcAH3l@*64T4~1mw<6O+TuyDfaa%FbUZ>m5ch777oN0hGSTDa%&pk{n zji_hf%b`Q*LMVj%B$ni4!8xxp2J(Z&8iu4<2^CO*#ScWx859KG~AQ2O&d+(VPVK{BV9Tf!dC%-lA)ytT9#Jj?< zg5PJ&b|kIqJTU5J`KALa)Cw5IGNPXb(r?zr0!S60K^6sMXUM1nZV205#DQNnl(CSq zrNP7|O~Xw#?;AXBde*$Y(f-F4I?R1MU;EeROj_gqj0O<^j>6hoN)Rm2)b~>usOpYH zg#&>O4=A*~*JZH*^`fZ%p&;^=H^T}l2U>lQ<+BDktOzEsu+lxDrbX3spuvo1lTd9* zo85|?1UxMH3xq%x%qz?O9jxJ+YWw_9Zs4L8LC9meD#|EuU`CW>SXbwN_|WL$e2Bx@Ljz}kfc-<=zgUx?^y?cd;HY$b;j2M#VagwWs&$f4G;pd~%2`^j zMZg{6yN)(cq93AvJ+8mlB^6S3puUz#7V-TeFaoP1;~n?8t?xaB`S}{gGP@ql3D;rT zBK~RI6k$FClF>8r{LP5%haV}vE1gJA58m?^_fU9v_M;zU;bkdvQEnigq#ScwnR_rU zqQsHO4nMl_>(?nRlb9{BpQn~R(=WvKnS%L$vlCtYTzK2?9<~9DiIKSM;lK{veyss8 zz4oK*y`7c5E!i0lMM1aa)$`vJ5DE2=CY@hv<<+)(_KUC^GRs1HvFg;T^ra1twjQ(h z89Jh#lOhkr#E&bv-|Fy-;;yIqo^4al+$V1I#rUeV>u2~wRp4`&Z%9!UZ$aEbI1LHR z$$$isPQN}_bLM?Xi3LHtj;#4&c+(viZ)~K|2JjvU5iv7n-we|^;+h_U$m7x(dm>L< zw_vZ;;d4C4L64;^x{4Q7*^eJu7{|e{R*R{x(YtrWQJ-%tQ`ib|8G*gb()q8MV+WEm z;V(%2B_ZhR%YmY=*zDe!r~y6Z~feN zXGR&ip0%km&^anuh{7B5r9NmP#g~hAKu5J_;fD`fFiV?Kl3!9H>%#eEV$wyVP`dja z$00pEJ!Y)G@83Ptvx48kClhpK;EK;}=x%f!4gmdSpBl^CMcYxpHPKz>&Q{s}`9B^0 zKTI3NVqDq~vM$sIsx&PP4fyu@GTrLme`2}vZRuAys`nTvFawA!X!WaqsY3@bbY{gD z7?k<*_Nrp71>hR&s+)_h3)ow;f0Cp}B9w^G@jRI~2)k`{#rjF5_~Nl97RS7XzRL#5?? zyN4n^99wF;$L)0Zn@?-J7gAzE{|Q1Z%F7l%6N*i>-UWo=L1b*c5}?OsJVyRA*%`F# zyAY{O)pJr%tg2#O3UR=h`lf)izF^NMGXyyFlF+%_`uc20i zIzwPCqQ#Jo``$|m?~GIHBlqpok25+1&NpWcWAg@_xmg^=*>mQ|Tq-E&?z}(E@=#tM z%NOj8=m?>Ldct3LemD%EO%;h4hz>fSUCnkd7`1-Akk$;D%Ra zumb0Xxvsyi5nI_giFIT<11usKbc@h+2+j?_R$fuj^2qAImWM|Em12X$y#yAF0z9s9 zuuWzor6bkpW}m|<0;QV&S3L>N7uF4f)G(dxyrAICDYR)UWZi-6$rPIbB*eNuTN`d& zSS=Ztx&B+k6QeJ#dGS-X8IlCgy%*h%|2m^#xOFqYX^iuRZ@-?tDOL!3cGnh8@s{@+ zv^6WX?A#q`bELev+2(=KiOGW!5|cZ$Ufq>}oYC*mD!p`~wAV+5jh)%KQNe7}v11I6 z-dg(hVx5?Ru7F=$01|>$r04nc^go@~%<6Qn^3V11>u0j|>6%Yq#?|CmlXRE+d!yQ> zm)`E*zPL^Cen$Ay$M%W2;bR`JG*B-+{NBE1lLuwx{rvs=pnK=6z^OY5{#51u{FDCr zfOpW{lIqo&`>iFWqQ#+j0pS{<+#5PbN1^U>HoPlt^kA5u%H|geS;l(#3=Y;_pY`(% z;cf~As`<5t4^=N=34@jsA5UMS8gA)NB}G(DS+=*w z#uRZJcxK(CPy&rD)uwbKPl?uY|%!F(+<-1YK-({9V6w8Y^kl- zr<&1*wSbo@Fo~CNG^TE9aeMV>HC8dbrZjkhUeD6|tol#vD>=&Y&6smaIi zmiHQpawFPwz?ui8P9sLt!{{y1e>EI}Ov z$I5^$Te=Jzb^yn-G(CeT&pWPN2lZ&3TfVgF{%C{MuFCswbK6IcUghmm>!EZn6zm^k z!o$VGBO@X<ubfu5H;#%M@N*_bCX6t?VTGaoFEPXQF<72)B>o zzz%osFw5>J%jkwX(NM8a&Hba3XXu0dZiXru3;fn=t7PYMk%qB8r5iAYKRwic zN>yh_o?eNIFI^Bkn05z9H2m8Bg?nz^yeZ&@`Dq6nsi<|4?+f6BrAc*lwRX5ANqZv) z8WGhkMRr$>2ykGA%FujF&R7yg9k)eh)M)L zKsf&!XL+GtWUvL)DSH4bDl3IN22yW-)FoUd&}0-qzeM|x;NbF?r7@7Sh}wZl`t%)z zviuV@gqV^5SR;~bDquS)MoKrSIv8G_m4xwuTDK|glZ< zOhbvp7j!!t?qp{dO7k{kX}YVyAqH#mJO~;GWLHLv3a`V#jv3XU-4 zauxaD!uLlE15gW2$h5u{DM{=HrCeiIHw?WRVi|i>#@+5YSwlt1t)cMW=q>8X3gQl| z51}u_P_qT^O&r{SwK(?C_PW(LHi4dAL7H32Jf53u zlHeiLSwv@wf#WqqQL=ReJX!BY8*T!ja1YnV?(}?TUB|Vbf9~DC-=Mn2;i!umO&%qP zqpn^v_lA|1r*|(uzO!e1V0C_~nQ2XHLGl071S?FTYX)jvboq_rm)&m;uM8^jQbFuv zTcH7q;|c^3sKOrxFs1+$J`-U4E!UQ7*!J%1^8e!k^jl{eHTK#eW~iffd>^EJn`nFK zys1(^-AijmPX7Ja%&6zPZJwT&<6b#FdLH@xWZ$-L+)i%)+gerG|Ix7~ik$*Lo_6kk zNpR_fFohEY4kMuz;ZS%FAvXJNWHN{j~9&XIa`D-`x;}Y!65^ z!xQc0*2?&S22u>JXrAX@_y9>AYi%w@$y8p zN;*G*w>_w)1!WDoGuE*rt?rSv0Op2x0{tTsGc^;DJ6}a8j#zJ+QA$9(gq3rnN5zPY zYBi<$p1Wqnmv8d(TSSevWNBl+1nOKmR)uIS%}xIPiH$Z{_qea6rN!9R!NV1L?bEY9 zes7%3R*dg?ZjidulQx_Et0Qte($@XivDPac|6ufy-#$MJm94IzRqU9BWH*B`zwjYR zb`T2Bto-$-_H<(0L38_F)24RsIN|W zmt9QN*83k*+@-Y(4+b#w5yE?87SGTF>7OktR%KTzUkcZ+p+FYzKox=TGeTFCkoGHh z1S(iWu+*Arg+CSsDPtz~{CW^7Ut-4suHFihm&C+5iF0oMRf)s_Peyaf{srm7>!YNm zPrP>l1{F~8HQqEI^l`)PQxyh zDlX487V$Vc|^~Ib*87h7@3jK z;B~n&VtArPzkc8SuN_dW*l)i06SqWj%VvM=aOe7Nx=JCsHQQH(WUJ0x51A@D+UFr# z&%s|370ruEN|GA|qhIJoqt4~S^1du(=~{Q~Zv*#s?qs!Wyg~S~laU?^y3{mxZ*jRI zA+Cp(>fpaUo12j^@P8B53KVMCF-CkVBnwwpzVWf3dCYSOYJB7FT?dd-=+UwwJd~@kz1Q`QwtfKUmGX#~BODSvjz#6ZB8sEstiALa4IzOv(Z*eguyfd7{ z9{Jw#O&V%Wm}C>8 z!=k!W&4Pzn8=*M}d_!kikLybPw4>V(_`1ll!!&>2xU)t7$62pmTk!P!6m>Mu9~MFj z?b&ZWb3KCJ$!>{?%O{Q>F^L;B{MpU-1I8WlU2wPL>rbD!^Q#ATX_KY(c};VBZcomH z8&!V-I!|&rW!tKRmBQ8U4+hcJ$i`I+AP%oqfjV%2T-ghDkZclw*cSh$EfERhHi{%E zqt-!lO0T?`n_EZ&FGh6qCW)td?%A_v*iU(v%QXIlJQeRnJb2k``r$h|YzXPwT&Bs& zjQEpx%(k7}KH4$pGPTOW`W4ma{65C2s zKi|N`_^h1zzV`PQW0arW@;&Gm?2oy&C5%L0v`TeNk1Ys4wnOpRz+P8()b3fOBp!W0KFn(haNaD>FRsHHb^a^8aWQYVy~_gd?_W@BvQnoPZO zHmN^ndH*tR)uoei{dkow8p`$SZg{)#AQl76sz-T!`TBJ{l(+70bk`HwlN+s5P~W2d zvgui?g`*7eD=*ivnUQE@+g2#8L@IKqkflwB?>+=e(WSksrKK{p-P(<_eom7qAvZLIzc?g4d*=^95&mEOPP>EDvRF#+=rlc=l=c2`md<2m_ge`?b}0N zzbVBozp5>^mg0Da4I4lp-)l3I8GiUQh8g5=Wdz%@BMv@k+@J1v^Z*$`t&&ZTeyg;# z@2j33_IHvp@pE5%x8hk}4k7xYEp+=b)N}Imy}KJ~uHUq&)8l^EGWWMCi~8;~q-Qtd zReOTlv3DOHmzuy*0>&2RIN`%fo2 z+Xh&#U26h97z1f$uZd4P=KWx@;=4-(f`v@%6h*Yu1UwRwhJRXgzkG+by9ub5K8I=EiRSKQB}TrA?pA{ut23^^@@T%5I7Y&-+=MVdV5bic@o4D`tRw0rK6i(9 z%aqn{tSrtyKc}nl-{-z*)66$Ld$F{(_P4EWc=xory)bp*r|@~Gz6wa!7PRM-EAqMM z{l{2Yb!7#+iO;Vx+6-0GIXRz5XP$W>L?@uM@GN-NGCQUjl<4+a>OAtb4f?-JQsLoJ z{E&vCvjTKbDgR(sRk4!hkzd54dJRYat+cLq_#iq%r`_VkMPHu||52OV)j zx2dAI`BJ#NEh>szQ7J;yb31nDZaT5JDDmVylhWa{&uOuORiWyMZuH7Al)>cRi4i)8@}73pz`sWPXopomJRVv z+wN?YGR3v*^&(c5h}#baa{Po*3>Hbrqo>^`2eUvcpqmE2uQeQ~Q$BDY%5Vh7=sOPb z!Z9&7!@$mPdsB$Wf>8k6)8ZQS?HdPF!3Awb02M!NT4uK1$%3ZKiOwHCJgJO;662@2 zcC%+otLZ~yx3*}kzkO@qlG5F`ZjEIr*c`eE1yP)kWxjpP+e*jO8=fq_XEK{$Y&MgV zN3}NLxd{V52rZm=GI0+)ozOpXvp|fd;OApll%lH=W-pMBQvK5Pz}JxYw|0xBo3pgu z?Y?XnJ^*)*98sxRDK>oc=;8=*=~5l?04j9GB;7t$%&VSw&bNWzqPoW$751H6)j6vNPPPw^O&-i&nOUN z5uD|GK}g$`dLcb$Y*^JPJT&+0q~M;Zsj&;I&qM?a&C1*^*>L|Zt-=>CGENQ5(B^Gm z09BW#FH2NO4%DwRpa3A_+Dr~ZsVolPtX3fakA6Ytp^ir;R`sk^mHMvdq|;-+oF5Zo zqf}>tO;U@xv8Ea-8TF`iQM?10r|LnH$`^ez8NssJS`k_dH!1$uor61Tc*DVgn3o z&^!XLW#UtYV2lG<{B)Bq{N{VuR*~D-rp-qQ0JaNt8I6)yZbBQF%9oJ#naC+C57Bm$ z-@5~JGwhdw1qf6DP^2rtfI3x1*MRzSSh;htvBSJZ_2U%*<0&rW|BdSLxvKEtk;qpn_;5c@e5t-06J`?4(wFdG`9$q5f(CfbN>W z>l{#-;1bv#Wr)X@GjehU`=YaB&N_AO+(5Pia`u@;#l**7$Z{`@YOnig zFz(xTogNV<9;CI@S~75nl@%lBZ2q`WlO`T~(km9-TD53r4M(TCx8i}mr)NBS?oShx zKY8D@7W>~>d{g?;a?fKCZqapHVPTK(X(}MIfWW+4%)_(kF=efxLz2DD{p@Fqd85#( z+Su$9G9=Sav5`9rtB{N_ix4OrFP1xYwZ0(dh+gGK zbGUlZ;Xr=h<=vBgK$pcP`q=pND@(}`2*~h1%Ll@5`mib@7?k_I&gvl^@Y|27{{ zW55eh|2?!9j{vI6`nt7uC@(6IeIhs8#!VE(Fo|8_{%xd>y(kl zTG7eauq#H_!S=#a{rSEA@D1u5Hk#SD`eDMsn|B&q9BE*YkZk4O>mcm*X#}|@9R}=t zQBqL0{QQ{x)rGaw(@!_hSs5Ot@uw`x#JoY;%a*@S4xuF^^uhQX-tTJYQ8WM89dOrV z%A~4{P5FcZGY1*aLE?xjUC-s9{3lk#(A&#CGb(Y=G9)TmpeziR*fEezEsE??!)DQb zvGMEIPnN|n4iYkq=z*S|UeJF1HzSt!5NONA)}sckp}vzkTC(Q+%)@@4S&4PY++mtsQD^x#bEnThLz0U8iHZShfd>>ac%yzGJaEI=1`t>nPHGa*` zfkNR{A(V@_Ch|gd0e}xFDi7I=SYpEbz-eQmp>QgQA2H9B^wfcsd(u0~h|uQksebH3 zV{~f?$Use>Pxug^0*)=_rj4mMKj8C8#1(?MFhE#X$?QbiE}@CdqR_;cnBn8c z?|Ag1rppO-=CO7o=Es?Uz(6x9Vdf$A6k|UDXf0a4JRV4Y*Q4w`Kd8@iauTQ(WSKBE z7|rqsy7#8-`bLxBJ!`L+m=asu=elZC83NceAa78PNr(hp5?5r|+vU=OO7F^*k*7@i zv4I9LK0|~0fnFM~iC&Do*&~%)ynFx(LC*aqs3kH%V{m2e`yBReZqsEO#Xoa$IV?2549@05_;k!+qL#XP()=;4C zdWx;}B+=|&rTp4p#)Ya_2p4_Z-WX`u@9#0~1t#XSxiToi{#bmb&_=DNLNaAyLBP`M z?-3J+37Geow?_T?nd7k~R7n;uDVWsOEEPclb&R>t{Gl2@|H0Ts7y&rCcL}}0q&*)Y zZLqJMpY_K|i3t?%3@P1{lzvHvO#*=@+!aJwY&`qUsjK=@IN4%1Ni!^qmnfJD(Z&{X zX}AhINsD|+c$Y|Nize@X-OwBxrLAg>t&*BfjdI5vLC zKXx$jcKS6LYZszTI1JP}ifREUcQ|Hv%UE&X0D8nvl(vNjtEjr_$|wG5Zq=Uoop7}27U0=3RL#fQqX-*5_9pdVCJj?@INcUlI2D}Gn{SLlHSOwE zb%Dii?Ti=p{8KyR&v|{7uu=cD2A*x{uW1`bj~z=zvlv%1Dr*bESR}JuVu!4e=;?Cu zaL#E7pODI0Hm@TF@0`u}_4S9}xE#HAeCo8A;pric)egJ818LP|9%r-YVbPZ_;}HDP zXxk^7_9cEl&wi~1xbz4uUebW4ET3ci;@HRD5P#88b6a6*e~oeTbJjVkC-(RAZGVr7 z3JaC-j0c#DWQO(jfqBVA-9+Grs2*v5Y>byr>yR0vy1TVm+nNu~gpiV<3k;LFOWA*F zWMeZt3}BilhZir?YdP=CJlhyA?7)!xK^8_$GQ<{kq^zC8`J78elj8ou8@A*YAPX81 zq8g+Ja~??Gk=AT$A2sUQQlAcQjOj8T4N-}?xHdNKxsj5ku}I&*WtoH=v`wVe5b+)} zLQMyMYqqdoq3;i@zs7vjs3l0G#^m_cP4)vKIspWmn0=fQQ~bI)U7BhE)?U+B8XNYg ze>vhJ3*2EF++%O3m~R70o`wGj8PIIfBx(PJI87}Im?K_?b2}a3XoO5@)3)svp7OUn zhDps8OY8k_D(MS12BQ5nJ{8<%^^DD*fk5_Fz;k8=%vJym5YY2Et(tphu$pK?FeD~FLo~=l0z73)-5cCA`L&i66 z(w(@VaSw&;0c0y9EH-j=YnwOXlKJngC*x`zQ9f@2TjX-pQ(oAUbeO}dWe@@M7{*O& zt*5a9Bj{geQc_AUbkZ9pkFl-rM;9iclFvMz*YmnUoQdA|)l86cWiOWh)U;Qlz1*v=Bl_ClXSU z>|}&eDtl&>)$e&7=l=cw|HtEhKOXmepT{{z z-Y%5gTk&NgH{xK4PMbF!A&x|=cDR0HLG0740^RFf3pWk7TP z*{MNO>ehyUMuMbqh-At_kX`k5?J&LRdp4^3ksg!2v7u(NDH&Bg zK@E`+W!xA~A=m%!rO3t?KL2il3L{A(_DSFdHJw9iNj5$;P3=CnAGf#k(ah8*WsBAC zZ;w~>Uv|k0ZmQwK6`Qw~w{3XsmAounuho9E(9tyG<|aRQgQi{`<%yo8dDJD~Eh&k9 z*&5r{{-SSq&G=DdJ7|*4ghcx#TzQjsi2zhsa9Sqpo56hByXZ=BM{GIW2kECLDD_ZQ zuumRzKjbTjP`^7#BW=~fhgZO%-4uw36S8=2a6rfeCfp&H+(p2_~DTB(hlo-l<0P;_5?&3`s@! z9nfIXF^4?RTQRvJE8`JJ>L{|0v)YA0ek*Jj4X(29-fE}>NU+e01@0(D(Ackyk62@H;sv8t2!Vpe)tzuDcK@|@`weeJB+*aP~0~D zLao!MY27*un$tYsyz#R3K>>Ypx-3>(xmbC2C+PLJPtwO*eSLo~DQrS%+Uy)vbhSNy zcYwEM$TtTRcY53wBWNo&n@BVn#czY#xE2kpv{~{(Z-C~7IEx4zCG zwkwZetxN1jW&uPln%r=5c?@1I@hTLe@sOR92VJ2W+7Rz28HQY+J)APz0#HKBTxT`z z$l5=@3GbXkPa_U@^sAUAyEt8Arx*O0M=#L#SxFsrQo)H(TNNvEBxSL6LP8h%&`0nW zsLAc!oZ!`%PiNv}=6$eR^cX(G+ ze+80}U}rw{cqn-Kk|Q%T^)*#{^l0CGaEmi9r9T&w93m+2PPV<*7rYW#R^_m?iAMA9 zZltL6quKi}oG`U#o2E+#g;(Bp+MXDCy`+0(Q+4%^lMZu>o=ngi!6RbqQZnU1NE8a3 zjr#1o4W(Mk6{>}Z*6iWPG+1Npr%wnp`J9+~n6^zcYvea`p}`+s6gw!z9=h#aU= zw}Szn30t57n9wzvHvN44s{q>IP7Kz*r9Qk^coC)_eNYM<)nS;S!Q`~jqtlO+4=HFO zkDX2f^k$cV%QZoyIRH7Rk;d{BCF)vFPZbFnPDjk}YhO7t(Xz3@!^_JV5h!%xCJ`c# zkib!+D%ayxoy!z2EpUgm)$2Kik-!Lt1G6~Y@RpO`6=4+-P1?{u)y|)u>gz=O_c9pJ zKS+iWKX3tQP;1FeKx-VdvQ#!*3(RNwd3Qehn^CGONZuW;V`(rCPd+}D*G(26P?20H z0`5nzS$&*^MVae5J? z&jr(|@FX-rtTC;!arD!$TT8$sxx+TorTS2`crOx)E4uQkSmCm4j4E#zG^^*E^pSwix;(D=%Z{5Df`thXvE~n-<@(UQzC#B zHW2@xXisqZPlw}|vo?=3j_);V(wU|ejc(+P*HL-6V$^GIbOJgN*Io>@9Q^-Kb-kDN za^FpPA%dDQ1g=z>&hcYj_m~qb=n^LzSeq6pU%UvDjwtvv=P@R#_tdHWNx`&i+CZ?M zK}!#5-v4HzA30-5p4;e$@ke{=I62mRjJW7;Yml}zB0_SnLiNVUrVz}mFAEMfk%SJU zXLtvpqhymev?xn#>cR>m)B99aC7WK|{&?BSIwOLRiYs+hikaD=I@ikOJI$4_d#J!c6PSm{R`h(wYgn zlk3NrHd$<;JbQe)hF;e_Lp)vts{}9>$l?{IZjeW9A0x=B&}CIsRY6Ac9yfD!gan{js#ZCcZ^SW`Pcfqwwceo-kUvxQ5ABEIjYYY+r1v+9qUsRkIzXHHTjY>?L7My4o%D}$_?I6>axxAE7(p7i7Jvt3j!)eZ3WDD9QAyZ(u%Sm9%S(qReS6$2+=Qp; zXBG>%R*&@QeQ1qa&fmK=B%~WVr66i#qXd_ef~a^+ zj&&;<>f0aue9*j0an=FP&I$%QN{%LZ(--i>?3#^)xuYseh9{v#7_xp$y1PFn;>Xie zr*_=}KKQ@T4$Ip&eLX#xZ1>`6>n;B&69?S^u!n>f@X!$r>Tz*b4ZEQaEzBcgchY7a zFPy)iE4-%+QezoP`4w{c5t&KC0nTM#x57+k=gSNpiT7)m;cy~clU$}>p2~hrHz6tF zEJfBmN7<4URA_h}$#Z)2ND#Pv`)+d09inV3TRLb*hs{}1xuh2BC;==oebBj+?=Zl6QuW-2@3DS zXI^+WvNrf&on{6*yZ~BNI+~kbT5yR_Pf%5lcSZdrJB?K%@as;~a-k_`$xTjmoeJH~ zP6c7rVV|uMPd@zW=no}X_V(=iv@BcaQ&YjZ+h#h$asLg z691>9o>dcPrP8>uM6}5Z2>Ns3%M+nQA$wrXXff&lKi8+6Ujn=uj;Wk_%KeS^$A-}! zk$!U#1tOOe7MFr__wYphyq!HkV1eOAd$ph)4(fj7H?=~XoP>DxxT`uiugLFxWyL=} zy+Trk*<{B$4!5ql@zQ$hA2*fcpM|Bzf47!&ARCjq@PwMk6L$RC5_@s-oyU)-pIpE( zzLb?!mb$#5>F`8Jn!Zz5=+nve88@6Tk3>UULBS&i2gp&b90^JS)oC@*GodIwTrN? zHZ9A9Ip?XC)6y2(u(3kcgyQ>_Bw3_`aPAF2>_&~M|5sW2wWw}3?pF^yN0N*vkW1s= zjltnpn|x~>KHv0IZ?}xd=jR*H{QjpDpt%(S-_9;^3c=-&FNN6;jEB#+{k?WK*=`%4 zU@m-FPMq>^_S-;}gH?&vhQ{V-+rBYhoWBREemoeL(sG+{YSvfMBp;o%I(2KCb&7>p`OsA*G z@#bg37ESH-v?}hf%cU1i(?{*T|NAwl@*absE?4t(-R9%(xCkn#MGtTIFU0nEHG}5b znu?aBW>rtSmBST_5+LL58IIogBkDWz=6I9m#5imFp+py`z1&6>-61KN*=T z4Ie!U#r}FzyR50_mM3c-u$Qd_Fe3fInx3d`Rq8nXQ-gfZLam1fo|%1^-{dsq=6?+A z@@~CW_SRXLZZmqc;(lr>=tdM!{B1J4bS;3YH7J^S`+wsVso=lLs?(xgT#t@le&@LC z?fUI*`-}~Jva`Ir-@$_ibGKdg-T$@avH_=zX^7i4%ztBlf6Vvf!1;7Vv+;6E6GeeU z4zeXGy~E(;V+<~|WVgihXtLiLU5wvfKjCd-i-_qRx4$@8-mz-S!4bwkcNV9yj*xZ_ z8h;5i2($z#`)nl`W3%o7i>VMxa0H!a&NRiCNSh^6O)>?lh%r{!mXp=JAgW|j_k+*A zdVYI5mG|bG)ni6jdmQfcis0f9`iFR zb%fi+o@zO<0NLfd|5&o=E_k87KQVu090)gx)k!W@)AhlZnSr|1q?-+2Ih`Xxiq1up zKRONeuEu%C73+dv@xz?=wg!1cW;WHz|LU5H%G%wu>bP>*-xAq%54=DDv0+p+QR*=H z%pvO4(7kvMdXqlOKDL$%wBgbb$39grc)I_|^Mff!Y$R$0!XYDQyn2SnUB|_mDKjqn z1J*MF`6Rt#^&4F298jU!^wvfwq`0TXEjoJQ>tWA2osAYhxms_2S1?um-B6c5BeiCcWNLKk5ZJtg0xj>$38qmJhXDM&FT;Jj_yUkAnp z$*jxlqy3yaDFOBbTL)+xhT`Y&8h1Z~ee?gLxXwhcNO2e72z?{AAnp)Vpe}JC(Tko> zsi|$_KXh|W>OQr)i5mYh7sFLvOic~}h#@uDYQ4(n`F+25wQZtVx0_+@=Oy|7(U{8u zU0ekEyOAtqgLIWge7UG-7r6p6-Q8^glGZ6HKA-qI z0!_-k^tXQ!yyciPP40I}L zUW~~jgzjSZs92H_q{X4A1uLNn_mYr5v75(KPQ0sv)e%Z#P3)w1(BD+O28|m>9**88 zsq(+*V&lK(rSS0{K@{=S=_q^>I%RY>cU%7DPl&y{ILBM7!D@u2eZ01GY}Od7y>o3K{(*J-a;p*6@s;KGZlbb@oj z%tLl^_Ta(Yhrfy+&?HgXOC0`3j_9L&ahtdn9Bm-Yh=9w?f6XEu(nw&Jb{` zUT1EDwp0VG<83I^+xPC>I}x=Ao>6(`uW@UNo9%pzIi!kZX>zwpK&D z!wrgdp=WXRss@gG=cQ)dJmLQ1$vcuEdq46vp%p44(#$?}Hn$I8^N#wnkqU*H|9EiY zpC!)^IiwrVVRZu^<{l8c0WV%4eFFewr@!>)=*K8`Wt9^@RxYJQv_(u>z-9%B)-6Q} zz_EYC7tx>iTD7jZ03s+hps0KFfSvF5L7rt%go2r>-xa|EavWs9)4!IkwVJ0*@2cJ&v@5f`3Sa}SyR~ttS`}-_Hz%5sxr#7c6xK8 zm@Z#_=H6;=a&uO5t(uz<)nI46q>-B1_+|}bj)jjMGp5HR58q~j1V9=nu;uzX)j3A% zN`ifo(CoM%1c;I-FuDl>7rO%qOCkgF##}_ra$iFC&YCml%cPqlKEBRy(?5!Mfy*Co zD9tipGX8Pm)I+`N#dL#{6;Iz9ZCmLL0Udyl?3F!uxp;u`a|2Dt14K@gPm^&4gRSh~ zBcVKt{c<3%{DSfdaXU)10*6TQMS!ZkXyFM#EBR_H0iZO1mhF#^zYJicNZOhz;XWdm?mTsrW!~6J z*~R8XLx=T_;f==SNQ!z8M@HIjc}>m~LxE$K>uc=mJN_K+R?i_bt#PXISqo~vXfH)e z=}&I)s(R+jPA{r1@=xsCQ9EZ-^WcE44=nIUX>ZvdJ8F{WO615!lKo0lgNSqhd{t_ntRuA z9kUF7&b}7|C0zVvwqF9B+kb0XPT}>DiHLX_Gy_P^{GbbN12mJ6l@FO9Q5evPG*A%M zC$`pZ>le0rd#h%fU;8T#(whN>?l{<(NZntZBw$WkJ7*82kpv|h?tiz`U9>4`y4N46 zk^GG$W1&Qia?MEm5On(d!b5}k8^}cD&9Ug;Jk2ka8y;e*~dU0X#0#UHjo(?5!=@*?zEqmX+cE5465p|kw4Hgxl)t+_L z-3r1cfBO2h0&RtWN1SkuMU=@Z18jt-&d^dE{n|dm=4p1?z)BxVt25u80jX1j)hohQ zfRz$ZpeW#tf5!4uLIz;j3G3K_N?WL=1g9f~ZiGy+f2QfRhKCB>hVPHG?1Fnofo`Dh z-t>)e_DLIWIb(Sj?^}WGutcii()UlIDM$uuf31w&mohE+N=b>+o1rO(K#A#iJFqVX z)}U9wZ(h?YBqxK7v0Y;fUxU|lgmx&)7ccS0w+|V#?N4FhN{7{#o}BryHEq(Y*+(2 zQq?&z{jh9`J_Hz4M2mEQ1wV%9>QU%NaI_eB)rgF~l$$$hg4w)p+~K8!unrkuCu^pj@8 zPp!O8($;!7X3RmUuMO~a1fY4}KzG$K!b-?o{%Q`50X~RhQ=XX2h0_KXxEgwRug%(^ z-}(_&$^NmHced{gnxpLVsQ&N4kCrw5ZL(tfFO$Yx9se|JHSO8gCQetbJnXhPD(tD_ z9!dP=7j~hK6;&;_5jvg$1hJFghcAS4*1VHyuX7!L6U!<3jE(#zWv<% zP`}fioeQ&N(5(6K!+>D9`D6JOC}$kGVWyzY80ds zBJ*Zf)iY>ANB~km#&S1U{g3m#q3^1abwjJm&^|FS5NlGnw4TS{tr@67T1fOz(kI>UG^dQ|Ine zxtV&z}jSgOZb?Uc2mU{*QHBO%o_P$wg4ox)5MF-<++3^b%GIOk;in699qUICqJK-agSyE0tr)%NE z6N<=oWcvt@Sp?Kf(BVC&x8SuSzF{D*9pJXbkn>G%aL)=a1_Ckd9cM)5T(zLu!a)t2 zG|5H0^=j~eQ>E`WnFO!Vyfe_^-GkJY$~Aq5P%1Du>D}_G>|Xc}SEyezuf^C$#q3G) z*Z$_8oYdKQW%^IA*ZBT?(og`9rN~-1C8$$chHA7T{@*LP5m{T*n%D@`itEg^OVO|J z4%!1e38zGQp=^u!Yl5JsN*(H?ofETg{kw^dzkRRc`s><0bD>h=nCo3WUG!O}q@e*| zE1MWa`n>UM9`@%_)(g=#aGlD6d4PbdH$0%Oq@enCpZfml-ufCnGPYaPo;^EY%4@sx zM>VhOUCO@LWawOt(7Upw4f%+s7n%ZP4G(Th>4+u6hWH@+qy)C*iOh?a)y+cZ0I?-x z%bIr17&vH<#45o3K;ujlC}7#^d!>?)WAv&W361hr*p>=s!RO|hI%MsyC>kZ9swK(9 z#>OJiein<%(IOBgX&P%07?82=*ICO*^STcn&wicNmMNL;24BQ;^c8iLmBnw3LY9T6 z4bctCiUz2TeZzdm&Yam%xcrFnO`M&doBTZT_~x8r zxqa3yj@|6MO80q2zo^3Eh6~c1)aGQ6HQbKf-n`PfR}e|1knN3se*>G;5nNlu+KJn; zbEqW~691GdYiH;4kMM=V9r5Am^%&Y@# zy_RwS8EnDe$r*evAP}g`tDsBU)eTY zA<2Y<;lJQaDcS^|g-E;$8`jI=pNqWQs%xwg z{hqANJ=pFam5-AyJf|mc-g(?qqh0W^7OTV%jw{V_BUABHk0jsWI{S_T_Atp=EIlVA4oH(Q^3wxWmUM6asX_SY#G zKVnYRtX@)_RHU6)zn#}_ha5BhSLx?&K?CVaZ{At#;*x_%M^sUuO9iWKV(lb2R(5RB zU>^eeKGAC3n%Y14;a7A;G%#kLNi-5PQT?7>n&;EcohDAiQMI>XO9A;9lk^`f2@D8d zE?efvL2XraptU(|pzj;6o}$UC592_nHQduLYbeempgXi{=(nr8KV zw!4kQzSv;d29#rvzT4FAti*;789EZektVg2jzm^EbsDj$!Ha;Ppl0T|lk=9(s%P`V zWjO1RzqZ}oh2A9V>rprr6dPvGEq^uzUA({g*rX#WO+L=@JioGN>g8a&{)g|a)N@!q z@8<0fqH3fdFw$kJ7erg@x(q5>P-t3r`_+W-SFTR`eT|QQ_{oWjiSG#)57JOdU~!tm z+T5F~XO#W<+WqvQp&O^ocs#Cn+-BYZ(36%}L;%$esE6*3jJ(2tT{re4q^`h$49iHr zU-C>5K4a3^PD8%-q88ic9RB;L!asKD`+ILukP-`HXU?3tZ#MsP)9)dj5+l6q&M0kr zT>ESvUz7h?Sg;(5c7is)~*;4RkUYR0`X8%{u`8a^(h2Bx%+JhZvc4boI3O|X)JNm7XGm5++LDi@x2@cSP4 zmero}$xd5Db!yt4l_Ki5TOF5{iOUUoqS2+K)`oX%xsGoxdAhPN_M2%@jrX>yMXL%@ z-z+LV@^)$Eaea&1WwrX2IurI!sPlR0uIxW~9Em(y3|ZX*daPkQ&#Dt=TrJ;!;4lDg zdErV|fd|mbcE#}WrR7wUhQ19iMoh0&*uHE)JAcE7b1P=Z3Kl*oE?k;6F*IQY;X-Ro zKofd^JenYJG=g}@0Emw#o(0nY*2k+0ymyI*njQ7pcd=|H+`1={As!Hqkuy>U`G{{bx-ENR2ye|wS6 znCiIV5Gxp>KjDyUM?u|XnlxccS!w+PU4Di5HkhDSxVy3oUGT6FY|2%Rqg0+cFw+$S)T=Wok7oU;q#H; zH}9IV&aDD9dtGUi-%|h5M;m5&sr2BHxgoDw4qUsogF!p3oBf~ME)4xN;Ju=8``XKW zOP)^q&F%e20=;|9m|dFXkMedi9OX&0XKQPJufX!2M+Q+Yo=Ee~Lp4Hos{b7y?Xzs# zwL9%LPqEdtrPWFCBQ!#+cK9R)_=}1IX}a^eBw!)DfVUkQjCsoj1KME^8L#A&V{j#5iJHx^vJt24**thB_Ijz zX2fuCiLGhzOIyp17s+7a=&CLo3Mi-bUoX~m&V$@*$Jfl8wLQk-Iioo7O5TfZ99Y=W zEp$f^;>}4j&hyIC%W47aoel{K42%_cd`QN&dPx(0-28eAxZ>a{^)D@}uWw4oZ5^BS zZnDXDW#?J5E^%!QaFlz{z1e}B?(S?RE@l5-P zqpRE1s%PB1*u}V@**XS3rs5px=joydtFBgw%b#BTWvAIV)%`Nxq2$lZG^f&+-#gmo zzV`2X=fQP5{r(@~%krAV&HJNj_jbXYEdk}5Mo;x|yQ8pf@U}WS3uXsLr*v(u_efDz zSfW7^Q;F;_WN$VRyf^?z!RZ!HE1p~9|DK!0mxWVer}Q=~ z>9)v*pvd$r*&jmJ*u4CN`Ib)f4N~KQoW;KlLb;KIQy>;IL%*w=M;Z} zbgzR7b0-_W zk^A8IZ!O$@uu6Npjh0XF#a3E7t=Ym+C9b=!{q!xp~4v z6bG{Ef>U?v>sJU@*TjMD=$GZ!C&V_~;V0u{rfFP6cF{tMT=Lqg-=&rGF}&Aml;ldn zs^b%sooq@1OV-@9k^dsrT-E#5g`UdyHL`B!Z8i48z-tF1x(^%T3=?T?X9CR6Bd-jLtrj zywu-pph}Ot8iVkW6ocaV&hP1fqIT^9xQQwEy{N5yyb}T!%81&Xe3j32$Cj*F_u3fTg7X< zdq0@Fn-xP04m3Z0pf4%F&JXU-eatRp0FzmyU!#B-^Cp$v{Uj7ja+|54PsBgWi?%{T zt(qv(S8P({pY|#b^>Gkbc3dm?;0m~6P@8p(>nUKo$&jM>9asLG(>r|s*fw+RPAuYNn1G?9kt;{uCe0P25f;=e+F->SBmEFJa(h%D;}G~| zns4tRLuRONvoLoY#aN4iyzp?6PFNmV%ITTr+1mmW;ztGUtI>bG^X|E>_3Aym8BMA^ z90Oqr!pZ}iUkS*93B>Ol0&Z2gDtT=*+lwGxZ_vg{h~@s8#s?D;G#|TuYSL-f7R_F0 zC=nCA=yNy!01pbqYQ>{kusp%5f#sMXOraH09WTItQL6skP>}kd!;}!VHJymVTLc)OW-*54Fr*_wq?%zIC?|pvnN}n&p6^@Y_S|jV_ZAu@iAIls z0c*IklxG!SnGrJt%h_098n?_5*&#(=4PJW-%M0O)%%}S^~pvF zxt~h+^awltIDJnw!?-cr@!%V?aCr!5b956rBHOA78L>x7Yfa^eO|bhX<7Je&7=VxI~EMty3l;(~>I@_R03* zeQ?CG&nNkG1p-*{6SxL$CK?su3kk*7S=}Iql+-8d3s0!sq50Z-F(9}?*X*D8`7usXOOy8ZL_mmm_E4J`8V ze$wTr#KBtAyY4BfcdL(4TG`H$%z00)b(SW9IG48cTBzMZtr8iiH}M5f6a{bsx#+Qd zx+TUjY(+6eH6>Y|XRjdoT;6OEQThK^8=%>q5l=RF4AfrtnJe<{;MAQ)q_C}tpJl~h zJ#h6Go&9-Ayl?7k?vN3VzJ%Nwm32h6qnA@MY@;p!LvHS7K5xMjVz1`{U`E6A6D4S! z(yU1*-?&%#mG-QR&SgXH?SG28ck8D58>oAUr>8bnI6Nvrl-crSqa#Qp+>Z@`zRc?3 zesQu3DaTbhk12vTC{$>)*3QSFl+g0XN+Q-pNjc!N5KCog1~GKc0gThLqD$e^z;mAuk4Y;^lx?U!%g=JE!J|A?+Zq0ihOv)BH9XH1|&4yahoqFA=0Ymgi?yy2Fhpv&Y* zLX#r*lBZ2KdV}i}8CY448Z@`44J7t#`t*K0WKtT&_3*DB+GI20*vRzZYq1pNPwZjX zuiu&vUOQVb%=qijimVX(m%xT1?gx5C(sqB)P(8gZFa`w}93)nn) zLW19C4|2~rx8(p$qwL}ZQc-zS7eYD~lRtY*c;!%tNp6hv`wVmlRQ9L82fRVgd_FGv z?I=I1#MQ|vPVBh9Kg8V3ObG+ebNC*l03#P%&f-;I$ds4^ZYRkH`;VyLj$@BAdXAaH zZAY6YO!;?7+`nh@KxBnw$ePFnKYxD2E?WRTkaq0G0XrSp0s?uDl|bxO0IqdQ*;LkG zoT^@gYG_8@qS#mMDuaJotS|`Q^<&1Jq&q!S9Q{UXH9I>Ye@L7ijjKnqp=e6+%S^kO z%wX^gK!mcDq%e2fN$JiyJ`*hv;$qM^I9O)gQm9Kt>yi z2ANEKR(&paa4Jc0-u!U@_ymIx(jGIkw*KFQ_4?V}_G-~-EsLdw7akRTes_BQ^tcn(TKe_N60JU8&fogoEA=>l-f zySWEpy=XJ$8zDLZaZrj|7uA8|s2i~P(HxzAqPvJ(7e$Lkdw|NlEsJx!97fg?EKEK% z$imwRhu%F*8Bsk<_Z>At5*69`M!t-UjPxWP>|BtkN`a`AAeL54+N%R>$9Mv{?KeIh zi3R(2P5;~wExA!0Y2$qE5P4bHwAR2!ng!)^^?qcV2{jT$t^W*SkNRQoO;He2R( zId`*kv&XDnizKcdg%>a^en&EQ<`>5JM3a{yE_`Y{&5Nwm;)pY%uLGkL6$|Q%l zH@Cz0tIAB*S`_T;{8&IC+%fEnhYcW^#@5^0#46D~z`O^XG@Wqn)2HXx&6}{A5%>B; zlQCGe00w~KFC9`d{JsE&IB}BU&qTGBu5PYZ2TdQG&{XB~AswR|(U+_9!u*3R2yg^8 zASY@ze$%b3`&<2!rmmse=>L9X7R&cu_Vbu^vvSdbQ&Xo;2Yfo-{P&JUV?)6Tk(vnJ z5W*5A_saRN3)0+-0V5zmNqB7VDi8A`_gFi5%W2*<*d={-nPU5^z5b^Kc*9L+mtuWH zjpg6wH?zv0UD_ZPRj6DMHcrs1K-VwMR$4j*dG%n20cnn_`<$X3MLomECHN_TXeW>yMT zCHd4?WXM$UCm{z6n5q1!?R^@{PayY$heIL&A`_5*-i0mYcZ#Ib{{RCUhL z4*iU-7?~d&`J(8L9+gRa2h2W_01H#eAlnFRu99C>w1=o5G~EoZivsq^KYg19)`k7% zq+jJ-NFo-NXbLHoJU!F#)<+!Dzgu7OoqTeEx5z07_mH?rp0LjJGe?NAnHlKzkJi|ReE=D+6lxa6gDpwHg?bp#Yp00Hu;8E;yG$1% zqHrD=munrtnyUFA{uTT|tk6(3y?Yl@Uo}LrPQBa2(3iGe0alMvct_{G#%)rkNz9W_ z;YclRAMX2t?wiByUlLt*4gHI(Fr{2m0C){x$FWDK-M*f}w}yAZ3($(#vOazKNU}A5 zZ(Ndn*L!aLQK6E5)|8ADG?$WS0klhQ!Vc~*+6dRX38OV`eS6;Z@qnJYDbBbdqn;12 zI9<6<+cEap+>!N4!g~hon{cfI;=7vfC2ey_fZ5J9hU8Xf%VzDwdVjGX4UJXxKab!2 z4my{gjzaP<&AkJKNgM=xRkXxXMId~CcRUuK=&uso%@w+!&+Wc5krWIJNhYCJ^lDt} zh?k_v#$`fn>A;%OUpznQD`7&$fts6X)iPJD{8inTi;oMo?A^Oy@ zcxW4|N&FJ+@Gr8{(+_O^%0EM`A*uw#`uVUVWY=V8XRGsA0NyYk$=W)wt4|;aJ0c=1 zIKqlIvU*KlN5GKesL7=Xr_B#=THDM1cy8R#nt)YjI@c_p(A)m?Rr61F<_X(Z{`oaf z^l||F7vEUBi}H?mf{%zjPqMBWZ2Ah2m13}3m3+oXZ|FCAhNryPnbzDUZPDYAd`thc zSGYgkl3UIj_^-~DTrGY+X!9~IYB^mfU-{!L&z`@ed~W}$Pi6j*=e}%yjUjUsd^@AI zMiF=kG351Pd?xj2li?nemS!xWSJbO%@BTs!u}6>f>UHJHR`0f>&szCsu2=<3s zOc;!XTh<>K`i+yIx<*x7+hSAHiQfOtC7&ZRju|~hG#S6_Et8U@1q$*`Un05QoZ=mX zCsI{pr$}nZ1!VoPH=aB1w@&T#Pnt*W{f8}7nl21pw8Xu1-@)}(bYjKD#d5$fE+O>} zAsjA88o(Zk zuri&@##6VQp>Ai>h$#T|j0wnMd7XsmVkBWv>lms)5_J=7Q<`C_LwWUXdLF*I71!m^K}2(}{K7FwEt%pv`ADIeP03{5 zB1xj8Jl`Rym&gP&)HFi5aZ@qbH(&Ef-7qeRP3<2|df!>fi{V%21P%no8fQjQVY9H( zlfyDz0yXKDB^n0fxnF_17Cb18@Bqh4$+`he1n;YR^k!1)z13+eNaP#MuMOP*&< z>)OrQ6g+NCkxjHc9uZSrC$_eBieBi6RC^bbc5bKU{_*C|$nF^$mDzXVEF^UqS;06H zP2FXG%j{KsX)*yMnt$zMzD3J@{O(1ld1%J#%)}nc;S}4GNSLk+a6#E5NxTSmx-<>+ zsQQgMlC)=4-`O41BdjJKGIQPOHe*2H3(et9%1aL4N;1j0ys$vhhs01JF^!yP|DsV4 zJ)GaWr{>vmB&`YgUB7LJ@sd7iW^ZvCfqXWvUg=oDi4omzqSe!I3%d*4KWn_th`SK4LI}kvbL|6A4u>2&x3;}C?MVYSbFG3OybvFfbHm!Lsy_2hhP6$g3$j-$;f1IqJ)eG& zfH)i~j1Z0)D-})2+COcc^Mt;*j5qVwmjYW0$FrTL8=gm>H$CWpLG;_yR5yb5gYkgy zYk-U@?)sic5h766fOQf|dsk5Wqp8TKZXotWg33bEv2?}~yF`b^-*mlUJj_SJTo^g4 z`4;|zE;H|oq?||8;J`p|$V7flL^LNZR{qixcp30PLMmPJ|7`V}`~1y^{T=JIGYP;d zPi5(pogc)%Q=#4#qBjs08sMb&4jbseIJ18t2CJSjK9b9Kr^se43sZ@qAy;A2n}?ih z@lG-bs1~{oPgwc!K+!@4q*h{R;szT2B@gR+3l0=OZG52^QZR&P+Ev=MfHPO{))Ia^EjGBz@?$>@jg+ zF|!u9B^LIApul#Oe0ILJh3(?G-2rN@&QgeEprlpct0q!SryYOi+xk|<_EF|tyN$~` z+_vEGqq~nQedk~2aoss=Ty22D<`;|-3cnw9ZoK@O5#-8>6${X#+l7TUIvFHQ&=dDN z$p#3AD1w`~KwK^QzkisrL2SkC+dFpbzmip!6pw~n;qhF+GMu3*qa9QMheuDIlb07r zr_sP{=Gq@~(C>@@KwGk6MSa6)$(faf5A>!gF^ODQt`@yobU{5imp)+W>wAHqjjHcx z*Svp!eLxbG#%=Gl!CSYo1iX|XMD${8#o-hDXT^v19N_`7=0!j6>Q=UZMCJsIYH_VC zn_1Q3*6C<#+q=oyc@9n%@Gw#YbRev&ASiJ_u1&n*^ld*nLLc z33b2lYHW1f&L?`m9v9(YVq!vsf2N)N{*H$@bs=ZCBm1!l9uq)nQH+aj1Dkx)=wC|n zS55CyHQ8Wbm!|8Ajaa**Q5Hty)cSy)xQef7k0*uPUaU73)Ed zsKNDL=rw<7m*2jhzJBR6U&e~qp)=`JAEB(cG)C!$k2M_{js9%3$!G)mA%CRpX0n{`wD=gS>}FTY_&KMf`>U5P%Uf-rwE+>mzuLCKb5-mdJ^ALi2qaYKc`A>$qO}S< z&{%pF|AoT%kte#o?zO4EZAKobtnJmgO6AP*pMh(O8cg5aUU^UxjnFc`e>E84Zn(cX znmFQAr5kwAv zt>{CzE8pr7VknNSe%74|^H@9A!oKj{v(Y@$4OCGI99j-l_MevABKsuR0{@6+d`aU^ zHLOVg*oho`{w=W4t^WQwo3%WK%9@84$2`7#@!WFSD$&Z?q^@WSLGY@7{P;}C5iF6~ zf5<*j6?}HygDuJT#`N4aX4EE*KdK_Es`c@w zp7YamP977$T#PKI)57vO<7I!?!iRQ;c6ULwz(n8_Ff{*4OChPJ3{d;~7`wMG*u;0m68!|%-ICrzLd=qUDQi$a z7uNS|sgCt_eR?{X@ipB+@01Lc;$mZGt-sl3Kj;vPU|#S2QEqgMA!d9^ncHxa>mw7b zy7yQb$Y&eg`Y$~H{*ITT(zyY;=`}Vf;TPH@d{oTYrko1#EeY0oS znAt*c;o-fT`Z}<74a#O2Z8ngrElu|0R@lYW+6TSppsP;5 zzg;tLFO^iXGiH?@V{r?uHzzDeeb)H!jGG-tX{x6_Vt@`g!=F8g6X1KXceQ%?*YjJi zh1Y%vG(TL(_<0jyGBk-Ce47Vp0^jR^W7U9Y3GisSFU`9 zE8!x)w2+zM~cG_w9_ zn*Rpk&*VW(OMUUV3_MoCm}y&@zx3X+tNca!xyF@+j!$IC6!`@*9h<2ZpkY0$me0Lp zXV&e3u;b$G4*10X70}evgl*ImxmD)~D->K@43)vJzIWI~aooPk?P6%>!LRP-G+5Ux zH!NwwqX6CU|I!Q`n-Oe;EG;dS%}>aWBy)w|H<&4wEE@=EG0jVh$+Rm|6tj%~pq)F$ zFewShf^;Vv+bD`;%wWMFnoLdLd8h2O*MBxMysDedMu)A)bqeA)cl)7t+$Ku(he7)b zJ+w3LWzt-T&`OpcVEzkfAa$ew;Gt>P_F0de3e*B}z%z(13n0~AAnJ&THVp)!&E z;f&X!K#rZb+h~{%*FQvEvp72zxpWJEE5a4v_d16CeVu}L^?cvz<>MWpHC-MoM>}Vh z_oHhIuKpU|;=-N-k1J0=A|T3TSjRZi&RZdD zf{3{!+Gow*jn&kg5o3{D=n$aoaN1Rac4y_9HEOz(VbXJ0n7WQ;%>bu3eJXFv*ud!> z%j^GNPhmnVF%N_jGa%!zDPWwH4})=*5l1uA);>v-Iz%X)GUc!Z&M)Dj6!JDgZ0c@wCDystZYrd!%Vj94V(x{p?s0j9fsHG5NuZ4-btTZB|GOAE{ty5 zG%vq4f0Mn+*cM~T zt3wlBneCq7W|{grEXadZ2LAcE7wp+}sr#w?LC;6Ixi3=HbTG@|U64#5u0%|dhc<~^E)PJ;RAq1W6GXw~H2#~j8BbqyR z6QT-C+@N@$iI4cq?$s3Lm#mnXot_)aJT$41xx$PgJ`VL7`q&&V8%3kZb@hy?kj#q> z+&;}(wQAs+d5Wne=kn!v*ZR@*R}CE2BC>P&5#G!KypPF0jxg@0w@%L3YR@{Hj_dFO zD2S39D_tH88@J?4hU=zkVgzSd+J2(WXap~^Ekf2T85&Ngeo}tW z*64UMb*F9v2c~C7Z*qAsu%X%vvypQR>-{)0;mFXbM}2kd5C03Sht9TL`*cUiGR29w zJNhd1ZRy#p-O0y}!OK^!n&k@>ji}O;_b!RufzYIQ2fSFN#MQSMqX@x1FCgwk@{66^ zZ?9$FPJ?Zd63+D*#g0b&5YSPo6LnbG-jUe|fB3K7|L_`nHdi}U@I@^Q&JM$tI3N`^ z9~+fYJ*UCnxHS}QexIb>nB^slxq$g`0@)%$p|)X2lZQlYkvmZ$A5hwmzNyGpMifG@ zdkzuPJiN+2uIhP#q$zO^!xcBSwzdYa!;i0Fr2+@~`R${A0^gzy4O)5>b+pr-t)|gY zQ}r5p7LQ8Em)R9rOCIklihwd6l3$X=lGrqxsn=f@c?~V?`@ZMv%w@xf&OlgpdaJXQ z5})aj{tK@s`;yFF=M!BVEX;W%1PZ_-3Z^0b4igx~`w|$j^j)$(8ibOWvE?=I z1bbaz-*uHMR`D|ndBjO)Ye+V!51UxPIO)#sg)qRV&;a|OOu3V;_V=G!{!TRb#_q-D zhH-cW$A=@#1l&%5=!Dk^cu^pBk{g4G|CcVObjS$CU z>uJeHz}nuT;_Nv5@`I6R2ocw*?M}WDf_1yJ(?)>gjER;Vp+-7mEE`KCuVOc>ASKSmzAigzI<5<-@f_W zxiyWwL}25I9Yf*LlP<%2AP_bN1qFFr=YH@%+8nhLJuH7mjV9`YNfWfbaZDrN$W{%( z5!~NJ?uk(jH;Yv-nuy8M*u4@RnxD$>`IUtYviXD0I3C=+JqGk>!WLy{ds!U~tn>0= zO1zyuLJKiMgh8nw!`gstSu{NeQF}}|p+IrxN6f8FSy8DtF2cP{W~=4O!^bIJnqa=9 z>-WSb>JMVnBIiu0jI@q)S)9Ie*RF{4x!;TVtBOBVJ+Z}3?eAhWQFcdDj`!PrzeGQT==pT!V_1M@ zt#7(s&CBbAmYr9RUP0kdP9z68e9c=65keOhN&;3qlVQc|Eq_G4O*v6u27dK0r8Alz z7CbA^v{FC(ec!x!!!ESXPn`J#CY@z($+o+m*8Pj=Jo&JPBgO>&sB^EG1p;|ABHXA~ z?%$7uh!a4yjeEwp#VHH;QLy0GC!qlxhi~q}kI40bekYn8pS3?rY9YEs(jY6_X`1gK zIih#P(7)TGhfTe*@4v_Cm;6r)P~vNgLMW$Uc2g?QEof?|wOuxu#mXuEiVUnKRV&$Z%VV500})Oh7w@`^k*U_ zAHiH1i_{ubvMB*!{WZ0D@3q>rX?gOsuH`tFvc#vkEEai_I?|+J@ z&OzG_h!JEm}2PwwE46V6y2s> zGwm5b`95W?WVg(MOj$mZ7C#!t4D=EyRdNd3!vp9t?TN1z!wU8A6&cST|A1(S!VAVW z;{FuT{jq6t`hvS213E>xYd;SRt8lIgpZX{0HctOb#>Z){{Gy)HaPADbpm*o$lc;~R zv$IB4S{RSjiCT21W(C&) z9==;Ll51P7f0};4mH1RcF~>qZ0)0quCe5jLp3aeOP>K>K0wOJf<_WGC zoQwH$9Gjf4e6;no>z^;NP6KIfc+;J^*hP-VJOqEJx3aW9374{778q}qk}#6IVG`Yk z#{i(lHE;&faE!GdBR)s>(3!9eQ5`(9^uzl-#=;Q`f4SGf>mlk&Jlq4LQ=eSA-B zc((q~r72E3Yc{mo^5hRUxS7f+CN#+zAPbKJEslTRKPE5>o#^?Q)56cKY*6bG&x9|! zEz+PQ$^)gZbaRY&QlS9+YEWcu{ID1XnH^P%;D)yqn3?C zK*iD|#u2^*6b5^VznWdxjSMSd^#^s`HpFzE*3;2zc04>$tnOReCqPvlh=9)u?<)d0wm$)g{6VZj-{jX%R93Pg8LIC z8sc77hsX!UjmR}7S!bgsPZ+3qT@yK^_uP8hSyqXk9s|{99)#>-MrJ`#l^c#Or9r)_ ziT)fYcOsj%nereE^Iy1^!Oa zdlbpd+eTZi>cb0f-?{41@5dG=?Omeo^-DNt?KN$A+Uh?yCx)L`cdB|}?YNhd_C)V7 zUwl#P%9tb3k2ddLYedlXMfO6#>~YqKxjV=un`OkS4xgG}KR?#K^zax6oun(2=)=AK zHfis4MyHjx+-n+fCh*b`Qj4^;e54Euk25gNh66q z94kpgl5lVLj8B4{&BM_>(O`|v*hr_tpPIJo*DHhz{{Jv_9#B2^@Bc3|GFn7-R?{e% zSy3oN+8UD5Fw2UNks={fvP;pFQphNh(oke(m8Q%TDWU)4b^HF#`QPW<=eW;}KA-ov zUf1D4GLNz@9mfpGu{g@A5Wmx(e72f zb>H~Ui7_Q?K2xuEQ-n<1FjduKBw!_$zvbYPnT=3UIK$fUKEd6P z03RLy(8%}v=(!6kIOLv6|EDO53A*{dU2Mv{E_Us)ZtO(B3UN9Ztu_h=IiB=r?Mh|@ zV1*u6HhOyU|N2k_rpx3%cpw;5VV>mfXwho|k4Z-7k79rjx_@?yt6tfE;rxOHLyn@i z^c$k^7D~@r+Fid~B-u z;4#Lk-6l=Cp*Y2O{W-*rnHl0I@K|(`9v|TT@%-pmG+w8*2RVOwes8SO>; z3S&pEx~Ucv94yRvoYq6tCR|}1G3ORvnh;66Xcx%rq{24m2yt2}C z#gXH&`zXF49hIYvuADk`N_{jzR6&aWMCq~pl(uUJCjA8&JpD>DEmA(Zcpl;H+O=+e>Y7BAl{H6L&WJVY_JrzTs&E7 zpN^}^2hKi5vrS3{BZTPc%hINpiv(H#B@%>C3|P!w6W>`Q`SP=RKF`EAxRj|s^$Cb3t!T8 zt!$*^WMlrxi)W__{MBco=Sq=QIAyESqD7CDZw$?^@?d94>aYdv21t|!AL?)PQ>FK~ z#>A+{H>7tIj~l(|sfwjmoJmZIR`)a6?3v}kC6s>?WE!xo6VDopu{_IeOEzA#ySp*y z=ja70%@=2Me5q;U^H+Oj*>N$T;Tev=&LJem=wXm(Tg+}}pR}|+x;6+%nVvlj=v+}; z17a1^SE;EEbo1_(pDgX^N)nqBAGXJa^jN)l>Mip5Z}awW4oR$0u5Xt=Qgc@9p*Ck- z&Mj$ z8rbH2?_R~PR(cOLFiWRP6Rj}CHRbv!%*%^stKdP&HuGB31{P_Wip+Hrdk9);Ve46$tQ6($+ zYO>Sgz!%*F^dc@07M<_I^YExkyYZr~bzpM!t;r zwjCBhgm%;N@~&s2F`9tKLl|c`IsuXd{R~vI@Ls!}Hn$@Fs4ViG8y?YmK;gyYPAj&z zDWkxGCa+Of@fR=c%O6i?a`_J2x$|B%<;2tINE5knE8@QiHggas`t%snVkQIStBf13 zwQv2#3_=96ur`|&cTCb1v12@i&)g0bGmatLjSdbvZ>q+o^nvW836__M^DMDn=9cdA{6Vv5?*Fr4NfyD+(8Kj}H9~sM~P2b9wUgZzmV~rnY?$TSg zy2|yNT(6+E`D;P;N9p$;V821qVj*t^A^~|0zK&1APSTL{>eE_t*$bv^N>f=|5_)Mm{TgY)Fc!jmuzV}KIM>5(RM3>zbEZ;Y5 z=+N-ExcqNo8G~n7>DfVprI#J7waVCZE->~0BQ4$o4 zZ==?K9HB1_l$!&Ztv1eu6VyVkUoZxvzs}0aRTN)ai1XM_qh`kTra0~NIfmqv5zY10 z6-|T%yrqxgt+e5EdQI`Z_HdJLSzDV{uMUM7QN?#Yly(c4C>LGYCg!3l>R+DVSuz`E z{=G|-y!ePTlff=6Kmzoo`w*i^fV&LAiLrO)&L6j=4)mRr7r(jZ3#DEXXC0UA{_$Mh zeOV_@SUaWyZpV};wYUfQ`FG1oi!KcGlQ&5{e)M~{^nwrHzAog|PwsNV@Z7h@-Y*BN zhT%v|@h6v1qw@xbE3qr@#m~QNr`me=f)`3_!K2V(Sz7Jw?X4UvwArExFPL*EG*qmr zWXA8~T03RtTYEp#o(F@%b-HR!y_4R2Lnr;iw8#&G3ZLswD-fwyG1XAY{sOh2grLA5 z52-$=dlr+k)T-JZ^>z#t0Co|gRj25;bu{pBbVKgYdSYXH-$LueJ z&`j8mpqZ!rk?4!86sSS+A^v3Uke=fq>>I>bu++8+Bj6BrOqkvntCrSxtGl~W;oqAE z`~1!p9^YzyH>_M`zV6A5!}r)kZT>UTJftY;nQd+GAgAx@Lk7D29wSW~a1gJFIE{3D ziPj_X&tbz|JJ&mx28x|caf#_mX?^t4qWxz(J&c?qpYWXH3NJ=zQ#<3#yP;=IWBcrx z<8;tJ(AIbF-qFv@M+D2d1uG99?uRAxlFK7yfW<6Oi`}uII|U{HKf^t??zC{9{}YXW z#mX9pCKGu|0M-oA07Iguz4iV-{Ac?w0agEMZe1K(*(_ft4@bLYOG|zCbR~L4eH4!L z`a|*B(Gd%5j?2S37t$wsF^ouv^tyKaeD#EDJw;3A!Gi~3L?WL3qW0RkDsTB%X<5|* z21t&mK^8IPC+;_Cef74X;JSvavzZwwz#*@X30LnI7LK?QV*+1@#6aXoR`s04!^dS9 z+L)W50Zc*9R4uI5ASV%^_k{l+Rq4c*g`Jn8rXSg%Qa^wrg{-=@Bw*aUb<2ui z5!v~}IUcB7!3{m0=$0>Qj#5gD!M4&vwP(*y_ie&xz2UBsE?ttz##uA-piYoh4trw6 z`N1B|ix*dg|D+IwA=-tPRGvCDV{Y;%0Abh>k$R09MeN$9SLY-Y@j@fF{;pRh%?yR^ zXxHK)7j4oqk?&xtbYv_9uj@kKB#boZIJG2*M1|DU^{Ff3l}(P3ZUKH0sRO1pS%{?n z{4z^ie9P2)onu;gw!J=&lp*grGD47+vkxy%z74BgTw!$-L8LJTP<>vR8~}&z9M(+0|@9+l4F?K4E1dD>4Xy8znx>-vLj) z>B|&!8i+lGnL^T6Tjn4+6l~Yg)SUN8#j^8?ES?5m_D(hY$}AG_ecHMW8!l!= z|7FefBeqn--0t4;lo4VMbJ($|+qVs!HQeN}ii)XX771biWY`%O59=Qz7)%}zt_2)% zw{5?_%@W5X(MF?CPT^f=6u(&OhRI(kVo5|vN41Hfq$u4 zqzd!!zFUv0hCxNaZffFGQ~pStS%-4IpU6-HV~;=DU{AsdkZ&8Y%@9bsrQkEfs3TvX z>us*aj?CWvXSYef!nB2H(|0a3ZI4ub7Xt#5M8c;95|+!BCsgf_TB7mDA?Qt`StlhLk_Xa+%%vfUCs-j$*;=8~efi-B>yF?x3xo%hKxmi`z~KKj1rhbK+2~&E*R~ zvh2psXW|vHFu>URG~yIg?ZL+G@gHadWwXZwXyBubLXHfzxr& zA}5w)=it7N0@m`%;F&^MfBjh#7cez>Ae=07h(>a*ffcHK^-b=oux;h#my(l<<<^|3 zMgoz|TTT<)R#b3mhsZL9!as6pqD>Ea%13!mMEDG`ZZa}J0tNVuQ-=-hu{XQs(h|jL z{SD6&kG6()m^CX$ZE&wCY=lC|AobyEh+cSfr2kC2p+eimk=l$fEB}uTt>92XW<(3{ zY657WSfecQW3|b;btwzvy` z-Y1NnJ14Kt`4zfv3zF24soT9+(;;dmdLNTu+pH}M$Mn=_sQxqjimR5)NSkXC(dud# z4;MIHo~tv(ulfENx8q;GL-ik)g zp0%-bd(P{HQTl;rGbRS9?!?zGoN2xXye__4_b}WC)uFre%w!l%!-EnVJ!PFd>qF zlylm)QV{8odji-dAcL8C9t<*7M^nC`g|?w?5IWJFJ|fm|db=*2J4bN)Z~l0;7)4nf zcUS0$FrVd6r+te?Yib5k5G+1kV+2vw)jdgvmAviGW?}K>nE!gSp&JU4L3C4brmJ%z z$U0-=@arYuI(XyVC)f2KzecPmL*-|kyKtZBdJcECiab>cICwBIrNB%!C^U-mWv5R2 z+pznLvXOPaXgaGltb(x?=g{1_a~XVa_GVj8$)*b*K5^o4Hef9MGw=)s>LalHhrSPy zJ?IlC5pCKz*(vN;uQ2&FE#vL4Ul4NWhK$xfiMk`=Os=hY*G9-Zo{UvSKVTca5)^SB z=vC})83Nq@4+xZ-h+|GH4dcg&0g%?o;g0cT|8uHdp@vbjql!KLeAyQrMs_zA2+6Zt zZ0msgT4DPs-smU>O#9P{PBlY>V)DNGpduylwuVKM#S@)Dj5uzeU00+vRm}UVh|X-% z|C&-kkJ6MbuG~VxQf3BLpBZ8?3UPHJ73R|QT0kBGF=3$KAw*rU+=0WI@+-Oid@fMksBzn}+hH(4&IyS0WP>`5yG9B8P*fa|Uf&UghqmA{GpP zgvgv%cIS>A(}RD`lZ)|2v@WtV4a<9U_I+4lHZ$`2VV3Q2)i~T`voB)Yq~*>3^IB+N z*jFLeEEG=qdI=*o?z9K|91&8>ljyKlTLziF@bw=XyHZK9+ncBc_YZR~sBfGfalw6> zV#{B6kjhWz>^ETx$!|zjJ1GL5t)?m{?9kcm>nq}}h+S9HN>Sz=dB@ipnUV-x&f6(( z*nFmOT72Hy=Q?{~SUi!^zDuPiefCojE}XUon-b#U*Z|U*LG>JU6)5!~icM^cJ6W}j zvWhi94mjsfSU0gP!gnj-!uHFaf4}hAm6zDA%fBA{k%)Z48p=ef?C%HF2vj3hWt2bf z5RqrmGIVU5d;a-%MoD9mre$0l-P3d8L!zZQr-=Yp`)N5~>b3&t;W;$;RCrtyf}X`r zY_`KNd8GuCAIr zrv>O37^7A009D5!z_BIimO(g*C9pGW@s3NjI6kNmPx8De{r82)oQ4l|r!x<<0Ca!_ zJGEKo_3K`+A`vvw{2fn36`CO%Glc#UzGu&&;8NU$gocF&hgm*jmPr5m%N;iHF46Sk z*Os@#9UxeDR9TJ?(bPpE=7s~aKh}bTD^vU>^=K^ zZSl#=uhcG1n5&QYHcoF|F4Xda^BFx_7Mw#=$*uBO$1)U807g}{e+sm&u=T z{lPvWlANRUUTuGIS<*c8>a^>olMnU3tMaFG`r95Zi(Fdr&I;witE=5nAAbM7uXQi3 zM`gU#@we~KjFms0d<|iWkTi;ANeINTG&?}iAosXc@Y_gzA6P8~M0x&gyS1D}wGYF0 zg-B2aMiLsq)Bo({m9rO0^2KT&T1Kz0w|=6;>Px%8-P^_e7pqliRq@rIS@qGlJjh)hA2G)uM1u+b#<46=IgyOR@)GJk z`?J-ScU&YhFYKDZ(ZUB0hU)6tY??Y^#QeBs2M#W_Ejo}a&yx+;E|y2YzzPu#KTV_o zz|2$LGq;JisD9~Fsl>C54#Eam7CJ&%xqj)txhSv_{uu{Gdz{81ws76*E>cprVbaBR zUs>55t5!ZN8gX;G%Z4HD+wF|NG3x5-WU^V3@DE12@cOru4&Fl#s{WuBVao^8ltjwM z)6EX1IY$%)rnJ`;@k{a+n)&T@O-%V;Rb$i>!Bc^f5wh>JifucsP|CAgV zDgX9(NU*ZW2H}cy$ZRL!TyX3THwPq30tk@p(z}JI5yMB-Es~J5y$B(sYvjkS1@ukG z9wpL?ApN<*7E6yRw|_L|59>QrLMuWIBE=RUxHior4J`li`2Kz&F--5NTUhiEMQCsT z>&?Kl54j5_Y*3Go>y_jUG;?*L?*C*o_+4->@0)|mSSK4e+pqeO)c1iHy+WZu(0JK zg{MarLP%bfPC3FZgdq~0u@5j2Hq&(p^;AOdk|9t{6U5g_-1w@Q}JoeR|9S`2hG|sy}&!OVMExPOY1z`uXM_#yjZ^nX+AtYnm zth>IX18to0+0zm?{X05R`#5K^sONRxTIju9QGEq_Uyj;A8tb~yOPa-n^+oM?nI(0Q=;&=dJ(~O*7UHeJR;ZvvP z(E#{*Jf()a-`WnhE}R-zX-s;*+pFtq7$x$f#qfmU*`w_C4y~zd#j2#0w9b$R;FHkn zl(dI?a5T(E$}K6UsL0r*@dxUqhqtzD6w-L^bW`K2)r8Tk4m-W5B7Xzu;oD6Wfi){^ zZ*f*E=PVKFc0LS~?t4c*1**PBUP^50Q-2H%GzWv}_2E8nU-}@-;o(L5xlQtkRY?2E z5^-`MIRhCyv9!Ol(p6WNMj(wMR)nnsPt2s1dAsFnrxxzaK^#C8kj~&Iw3Q-Grg5@R zCL!_N?dR9P2No$dFn)Vb&GV?C#V2{>0FVSPih@3DpVq%MCvFgH0{<-v}x1+ z)ct;w*k|pBw|*~9=FD|#>A81e?TUk~N2VSa5XofVn>B{WuUoga`esc)l^8>k}xAO`k3o?OrMFDkOmw;o5xD#6#`(DzZyGaY35fbfNNKF^<|; zm7t}_Hs9^)K4l5#?25%#15)CCl>7=EFziQ{TF&E;H}-GyEm+vhn8Ks8d$fRG#bnC> zw|3pSnQFo4Xy+OjWE+KMsVQ{EBVUr{IUKlc5Ku9UkcgGX&(eF+hWLTmQ`uqi{?g(Q zLg#K|ck*vs0H$bFF)W7o^kW5+%>H^FpMZ1zPn?k98~95BS{~> z>ursu|M=~v&senXo2iU6D)vQ}ta`m8w~ihx`9rN4JWeZ81T8{~pHFhr=X2sS-< z@SwTIKhEsxio)98L;JSQwY?HQz0`8DoiFgVdT0kl{zWC4pj{^`No)2LB0S+UI~G>aq^BYJfdcqAeW~YzRvK#O_$h)tQNCjV~4aoQaN^6Rm|jVmra~buD1x4PLKC@ z!Prcnotm9~`bVc%Q^RID8)^;6C<%JIs>1cnjZ3S)1VnILbJ?&ypF_JXnkIsAkFq9M zm{O@Ft|iGsIVGX+kABz^hnIZA2H7dYj0Hm_zD>j)H*sIzKWd96g}MEFaIjS{SItq7 z63#E^iQM*IP0ZePg=smspJ9w8fQ=v+K?4N2hjkvf@$BD8;*>$D-Q&v7pFg*+nsG8V z8;qgX#C0k@bz8(J*KvUNZ9XR87WNB8JQ?tsX=vD~iWrxyEpLz0(TPH13V}A=ec_wu z&n@!hWuJ8YP2|Xbg1P24;!u{mn%IBL@u%*VC3E z-xaTj6QGY+bB_u{YuSAnX7gRR8vr+~d#{D$ARoY__fF3Ha@tzqaD6x<8f9EbN!q7& zMuA1)Uk%rnB!?wf>&EN-^^2MwzCc1qKdAEJH3e%IVo=eC5Nra>fade0u9uvTkX_Sq zUM9STY=S}#F{$p|Yu;=ujk3?!hCGU~Ti?`F0it8*yAXnBCcLY=nq)o=E~r?eRe*@TQT-qWQ3;6h#d zbq&^sMbS2?1Hs{AU--C5@u_%~E$_c>d(FF93w6l-J!Vlt9F%Y%Q=(pXXz1jx87V%| zgfBWd$@W~+^UUX|%B>dh1#{lt#$4)}5bT7c~PDi!x6NYe%3?DvR z>f8}7z~FJ9gGxH zpgf=t+*1c;t<2H=r+DAz`t1Y0L6rkhahQ|d16Q_4HA795S2IISRhItyX5&lun_E>) zWxDHBs+oDq_bvj=3|6qs^|I@9`F?|-dbwel${`sU+rw8>a&0m4+YD>7)I2ucv|{88 zpPc^xNz;zVmL=7swJFEtL$n2XgP$k?H*=JSdKC%9|5a%bWvK+ zcOo+%NkF@=IT53@wbe~Dp1j^78ruKSmO&_?Q0a6L1OJO8M&5l*w?oWyy(A3bw>;3!K_dn))+I@BZnfYcA zU|>p@IK+OwW}kUD;R^+!lKhEAZ}*WSi<0C|!^SXWewhs-tR66j~Q8)*=jk~ui z9of7`f1fX{mi-6oeahNH(@ZOru$p0BjpWPfdeyh17u4J#_WFsf7w!Wd+)AJ~$Qr&$ zu77&FC=<B^wzZ$o1XnTNvfp!TOO*f zcY4KwQzo)51Ex@)ehmzoozl(}E(^NW#3f_rn^k9=LCe@K*$!9mhEFfPWhR+fd0V)> ziuU=H`Zdqc_O-^vSM?B#_e9tiYmHHyiWz``wsYA2IdkUVVM@*0;L1_wEauhCn}z7X z{G!bqfJO0o@yYdw-jqXo&NTs;?xChb;02rN9QcBy6tQVXWMZ;ldmK*O+qeO#CD`Nw zP!};dT5oqhyUl?6bMfIv2*}+34cf$fARQeNe~H}epMsx;0$D@2H)gcl2M@q5gOGY4 z>aEH%2%Ox1&Dz@QD1k1 z%!s@9?)lO|KR!3BJ9!SAMZB%Yczwi1C~<5usxqPqM$gAOGSJ`uEc~BSPfz`|;|HZK z=;W4nVn#1FQ@`^Hn#l^O#Uq0axmMu}k%Wv+`SpIV5bRKNywK}0+zXMWb;&-FgNv0# z?DOL_XZ~yk#DuUXT@^`WHa;ckXl(-m7wQ@sDpH@@sb4Lc5xMR~uMhHzcU!tRTI%$< zpny?u@6dvgK$~Lqq{vIy%~sZRwI*U|_D!#bCeC$%jf#axLe;;$wPnEBD@B5!Z zJ(6c`BJPqWV&#^Ts?|zHnibi#Shl zM%>)~cTj%I$PQ6ME+@>Gr8FkF8aY*_+eSuy4yzg<3dKfSaCeq*iChe@%yV&ZA|9FN z#JlcJME08B30W<8b18F`*e3wpBp3!BbX10f z{`G!&b@f4ec z`X3E}A+~Na#wTf}AHNFjWtJPm^T(kt!h}SqIPJIKS^y4(Zz=6$5yzR>)a0vo2g`vl zY@M~U6w(|)LeWHKmi}t}Q`&m#=@1VbOkMe$D)fDBTnDz?SZ4XW+_jEDAanAdDR++CZ~1sAue|orU8Nh< z4JnNq|ECr95a@xOKmE5r*a-8!sI!d|Tvu0@)z(#%8F8B#*m}AuX(*dK#f@|5*~c3e z^7qpn0$alRR81VDK3hLHf-ipX-TDtQTk92mc}zGKw#%ll&$mu5mM>dVR{D8Qm)@E5 zLSi8~&*pI0CrYYt$MC1p*ffbR{T(opP+HzWzsz@bqvy8d zg~CHu3GOV6b5uUJm6vIj>l+$GX14&O__Orc^8Ndd*yM39I~xOro=8q_{91YtyyVxf zU%-BH^xjx>%(F4O-{lXm$V95OXN=7lczUBU#S$wE>DIeDADnlS8%7cn;XD*^W_S_J zgax@ud7!hC(<6}|y5VykyoK~Ftc$|xn9{Q|pX5x~Rtq`&Hfldhhc_iIG^F9vT6B1H zYP5aChysSEoig<}mI-})7V!o`qZa`I@a*MGO)p-$N$VQyc{bbWVCR0%@7LY)ZKj#a z>M^M2Sgq}8$wkTaPLJ2Uf2y(j`i&dXLkkwH8|!_?a9_DfqTENdy$zu+QE2|k{_w4H zrPrGf>cDK!I&6RfP(|(%VG`2>9wCA=sFKXCvORhTSkU*6n_g)3g^Zl`t@BS`8F47D zzbr?3nXfk6JZ2jD(d^%p;bV^Qc>VaXJUd4Pee*27H|IB3lHkdr&Ib@JtVr9UGd*sI zhPqich1$pCRHWH5RqOISfnB=Z2nC0Ou*TdqKlCiq2AK1|j)U5A;E>&7&3<{IxRf zHhuh4y;0suX*Pdf%T-6Dx0;4~H9)cIF8g^7Lb7(JpT6-?iv0bwI?Y+0Z zb*ukObp(mrIdUa#fz%HXZ?9in6fF;#DHfBIjs|Z=KKr$ z{01{G-QtqBY^Yo(&`tF*F-*w>=UKRU9uKYZ8X8Sj1vPYxk#-eh5FjeOqLrg>F3XF4sntew-ia$*tz z?-ODoqc>?k^@Rxk`N*m7GA4C3HOxE`9N+KIZnOK=JzK`H5|+ixn+G1rZ8tjnyj+hS zQNcT2z)j(m=II-Gbfu+&Qqr_Vc`nPMM>u)+mYrVV*_3R1>`b*+`ii2D4;L$_1;;Mv zbeV4h>m}l12F=)0_UY4pPD5H*feyTI44$=jeQ@^LxoKnOs?NDuby$#999hy%a&~DR z`qwOc+n*eI+p)WIr@Cw56SUvhxGr%r`?O8RxBk)yq)w{+Fz%UXu%`_2H~e7}=f6?a zV2i%ORYuH-&&0*EW=4*o-Jf~ibklJFWKQV%f^6HK7HbbjyIo0**<>e%*b^Pf0@iAh%eGGpO%u8 zbmz$v1WI{bfUMtVT5My4TNj)|c=+i;l-6D}^JSe%uB==vU<0}(Ciy^_ zP*XL{%5xpuWRv1AA23*-;)VEGCGT!%!jev&({D)9R|^Rd1FHd}IUX)BJfHi3a=LSj3njWR~va z&pN!=iA8GDPmOZedB~yp&BmZ;H~Ih50+g}5kQnnYe_9pDZI^SWDJb!CzO$-MtkuW)ub z(rSKiTz+!%Ks=PD&4ya@C!gJrvuC8@ruM+Y>i!EMdd|~q^w_YBixjO`EX>u#=%Pyoz4YZ2fQX za_&L<2=D0;HYeJ3w_I7$tTeib zRw(=OSS+#Z*z{Ffe%tZR>vlXSDDXb-=jN3q_Ax-8?oQ%v;l$8P_>@YkAQl57UHh3? z^MSx`*u%C{`{_lj%w()ZrcfUmdR36GyNhD-bnMehk3k#}g+eOaNGGD2^St zNX#i}81z*f%PY7_!F|9p2M5iDs5l23z?TTg3r$qMC8aLS4Aa@zn10vz zRGR0UO*Q>8PwP4?TdK^1YyAqTRzkrZMxEmf`Y7!-aW+XH-RsNzD8|If>S~(PT}%6n zda58gDp1^%^u++@3(j>)vzgDqjF5#1ZWp1xMBNvNeF_uTtKm=+6C9c$c7Bg zDUJ3l>OenOJn>?-v{B_#5!Fus0*PZj)kiW7+W^c~Jb4uR1q=Q#Zw1$9}PZrj<}?)thpul);mfU$*)DuP4iRual- z#Zp}^-7>PFz0nR*EJl&E4!qo0T$j8E5RBl!V{ZMt28NK(F^N%eF7sx(sAQb(UUqlGgHa*5 z<2O(8AGZFp{pQ!NB1Rc1-i#5cGc`3iysUK9U0La|>ErgI9_?hODZIT;H_@B5O$S&+ ze=48BKVXxAo>jv{qp;KV1{o}MMtBs{qil|mQ3Vc5w1CY~2N#*9N(JN#MY0eo{n^@; zT@RcfbU^n2FjE`f5U>CwCoJA%p@-u`rN9Fg6e|`oRmu$*V9X9Mw8!`E-^Y#mRoVWy zg((4ij49!`udFLT;-BSao+Q$NGF`!x80xlF5_@__#T#Q^mwJYMzfg23!ObtqeWgZx zN~kfz#Tn3%5AF8+1ytI()kH5wEd2smo{2gNGIOx1>RzrDv~Yn8zxA@+aTd*auf0L$j-o=6Y)lGf31q40+@_<(tdcGr! z%gEw_&lHH_@EIM&N<(PjUVumRvlKLLuIN^|n__2hdy^co^GOKqd2;KBR1;hVGF8FB zz*%4p`11Ca5ilhB@GNE)@*+8^_nO2Mcz!OwikYq<(qMK;@2|klA|Y0sLp~xW1@f6H zKb=G0&PsapNIF&`F!p`7#9Aq|T_RM0glX5QV4fWNBF=#BWVp8W<=L0ri~>lJe^*g4 z*y$zjq9UIGopQ=`O_-)5$B!HG=r=!19pJiUTgBsU38O|As<{`(c@5Tb(|K51{QLJ> zAwRNuFtG9sIx&H~W0`X<&)REp8}C>!2)BqDqNp%~b_3ucYG~GwM^dYYPJR&JD!X$* z7ePwHm8>>2UTLZKXg_yAyt{PV@ID*u1*u*GcL2 z%Np{!mA_%l9pWC6TS(7FY${8J8V_|0yIbgw@;CO&fU8R0v42NbE@@g) zQc@z6R^W&jBaviIKyC;MM-b2n)Esl7`n6lB6u9U1B6ObGI-&FeTa+>F;WcMv|j=u4oaAA8*=t}vwHk6cRGIWKI ziQimBE8q-{B#lFL(nt8*pQg|*{HTdqut@aHF+lQYxSR%2 zq;rWZqf>>cv?NPe80j>2lax3-knuoBjN)#LS@pUXuL@_9(10FVf8jTpH3IKgoR$Z* zgEYAtmsT{45Xx>(gCT^yT`i2KKR+=hc04yLpm3yKP>jStWu2Y+HHwn{1DE#x0j4Ze zQ+Q(Z*?(eibEQ2H)x57)8IwLLl_x9X}U#O$)eWvHSG^OI*z~Y)o)sQo7s%v6P@NJPez-f zE^l}4k=U~X5IhGJYvhSEu4cRL(hKg+xApuz?&*hzbMM^h&~B8Lme{}t;f$q|U}Gi; zA*i89h%+6sbOb+`O*pLyv6EG$isU6{PM0wpN{4{*Lgc-hFSov*H{aWikUJUxCq_V+ zY{QzuG`;Xm=PpLBKEkb*B%jQOq;(1o6Smky=(?D4E$k~3N3aV*1UnN?b}A~D8=1DP zR>q@oSfHAD$M}$)-Afe0I`gZZl37|VARuA`5oPj&jr5s}XAok6fEvIvrhOl4ENryz zTG2B2r<%icbamQnmjZBY;tjYeW(} zav8&%Hi{j7K>I~4gNG-7TsCS>t=sqgdFQl}stdkNUAy^0cR{+tZb9xd3J4iB#7E|v zH@10)w%bzaW_@;#%m~+Ix!2e=JfFV$*&HNxG`z9T~34Hup?~WZtR;MMCJH6 zCQzi|gMyYpArv38PB>-twWh|8gcB}m;q%vqgMIY|wO_(&-lz_)pt6Evr)fxE`}t<( z=B`pNQ=k9&^GEFaRb@M0pRp^>A}(USmLYqJ=7sYlk}>Dr_{Gyzbd`Nnr1WBjgyxG~ zhD@{}+ax~|dLj%m)Qv+S#V=pH7=`(#_8RvuXoqFHM*VE<3Ogy+5Aj+z!fOqkUrS4F za^K~T+<_u7tMl#Zd$_XJZrBjAZQBup9%F}%of&CRIex^^e!N5HKy-^k`vw{usYoq+ z`?i16$P>ycD*kkI|wMjMK2v0cjRPB(o0J{Y~J? z&?xZdq*(JD92{)=(GYzX8bhA696Av~OeYHBGc&V1uqc$|Bw~F_Xkq|iI+hh@d~;Fi z73A?}WWlSg(|#uf9^3f)nElPIiiYGtH+AD2P&GsCD)HA>S#toyT;R#j%|+8D;Df> zyx}29`1nXj&;ULO$2eK2cKGTW?y{f{Mdktz8z&w6bA_pZEZW06{df_%dHB%|*W`?d zf9`YWN7(U|c!FVGQ0I0O^D8h2MA|Z*D;{FS3XS&Rb$cZu21-R0^IQWeIn-C?79S&< zgNty1Q3e^15@X=03N?$18d@>|x0C&3P3lH1K5l)NU#-@c_|_c1V+BF8;nW;!$t+~3 zl}bLjfJQ8*7rZUckL2thRdyE^`ff4Wn4sh);n8-&7tMPklbmt`B>M##obyazH}YBL zJLZHnbg<|}CsRmpu7%9sV8!O313#9Pj(yk3HBrazA$4*bt~s7evHFccMT8bJ@e3Ic zP0>=OF1_&E=tY?RCYIJ{>*zS4VMmE?tYl;!t&%wVPKvAX(MPRI7ZmPazOi-BuYMjj z9mqzX5oH)$?>!%n0x`sGrx8}lbBYrq6l`6Re7HihYw%)-r#EWkmtvwX_O*OiCCkL(v&9Y|In80Z~VYMfEVPv~wfG3h<&S zEgH|CPT0BN<-c24$onMv>AQDlEMH8%uizme7aolEQBerIw64lMFmxHB3F>Fip+f@$ zL3G5@xreDAx9j-QNv{vtjjTHi6q=7Ih)&LWY>S5(0LDCTxV7rszE2ZfBcw0a>84tr za_k`1Jx@T)e#X1L6SS6UgKhC=jeOaV8J8XwCffv^%ouWWyVyx%Y%G%g1)76EOsET3 z_bg;rJi`L!VF>K`VglBlcr(SKEE&qBgXNuW_m1>Ay81~!ucsY`{7ySeF~r(3i!ev> zGLQwALqsu!6N6geer5~l(!c~pcV^tr%hLQT+3-neWBWHdp3^ZH8yEF$dxdA^Ny$%r zAvI<+5jj1G`mW6#v3Q%^hQ^;{cc5wO2GVq%ODn`Dz^V>pjb=v;?)*__;IggfffIN? zRqWGs#jT|w6q_D6A%z^^=g-g&e$tt*4BN@HpRQsiyj<`+khpp4&+u5t$;ce4xKeYY z4Qt?qB@9_=w3*3m&MUd&GcH|5 zFmA)qQ>Sdhi;@m*9UwbOA?5prrvajYn`v(!VR2EfhwOAh++TaWv%c`ig*}8$SET>U zef~=uxl3yrgt>-ysD?L8UhCy_4Z#a7oR_m&yJZs z{k!LYn)5cdJj1GcA6#G+B^T1mXW?b%EDG%c_y8QBa`{SKdG0Y!q0@#8a82&NS@)&C(B5l0$N=v$D&*KaD5KSVANvFu{&?$lR^fXIjBU zPfHuyV*w}T4iozU*`7J?qt2yPw_eXL&8ix6GS>XO_ZL&&H(3LbscWctPV7u-L&TK5?f_3r(nY3BXzzNYO>_sBg9Rm7wwm_;O^+kVv!7b*=( zzz`j`9q}tf=p<`+QhrZ;WHOiLoq1dsHc*dQf`Z{JjzD!JCKxU#TjM6<6Iwk+sVh{> zp#vy&mS8bK&Sh)ebtw`8?xhH&q^ZhAzy18S<0^vJ^GRUy_~VU$`vOBv)8DU37AE|K zZCE&#P_sC}T8Fm|@?beH3BR)g0wOi@c(FpBlPzzN_M~tZ7}gHcV#E4_%fP|IKp}9l# zAE{f+xidq;*ZM*2F_*u6b@nY{Vdu-#0O;Q}gtYPXFPLTEh&jwfWm9&*YoDHG`EzfN z8jcCjdg}6x=gptiwpq6R{NL(LQu%LJo49x~@X80O&2XQ4F0ZU$zHw~>?RtqQeu2iP zHxgjWf6ZY0TzAEmrXMArFvD_h1o_SYE!^#FEaw6O#q3OJW&J?p^%SQPe1wcxhmUW* z+4AKEoxE7m1y4VVmPMHKK=H&z3h}8CilT?z%`?J)j@;1jVCbo?pL@yPvEI@VpM3fT ziAVH55PFr@J68c1{w4)*_3xARvuGwMt0;nMV%$CGbYg%64276{;+ zNesD*cc4-!wLCIRND-HgDN4{!EQH01D|YWfx_tO_;^fKwcv|SptgR-O zoSMpV)$^$t5Wsv|&C6Cxd{s%a!U%7(dI~Oz6I_*y4P{ryEVy+jDXQb|prbSGJ*CyU z^i|Ou#Qd32sMt(R3pWi(KOUv`x5xVBZnRFvorgItJ-YtSW0x_@YAz1Xxo6l?^5TV9 z^(x#g2!hdQ5VR`;1STd2bSOfB!6ua~u7ZdhTls3NFvXx+CxQU8=#aQetYeJ|CRs`o^<# zXJ==qt3@@V>x*aVt5^!>-mNVb7s6*>L7`88$7%PgpNi)?y9cQ0+Wac9v_JF1EN;-{ z%Yz4Ymt2%;ZF}soykT_a`?;qMAKnS5OpgtOXQ-1k|FrHLg7bd`KbRJ_e#Xc}=cXq- zlj+;fuPAlW??+M6z^0;45&mWu7yr4#W+a$D2L8ubH3t$NWcNJh2(OiBZP+eMO%VG= z_^k%#SaH^Y3_!8if_f-2UN2p~>d~;DANwQb5&qV5OZeu~5Z&WWMe^cDLg~wAM*V{y6{EXEozf z5y3y&o*f>#$m@f`i`0+7IRO$&OH6y6iYcX|XVDS5Yifpv$g_?K%RN}?wWM)O8#9B6 z=d%rtDfL;HalGGf#gW10?f7mqsA5Cbj`)%FNF~qG+YnW5>}DASs!rbt8zt}!!h^+L zwhmo--DY&7BM|%5slv03R%AeIsg4gmc1)#>MEg(8E8d%?9uFF1JgWHd-LhQyyc!SB z;frA-Kz?rZkenH4Ahs955DcUa5_KySA~ACoH_BCWM#v%nWoi3#mOun-d70VTOG(La zXg5JN68QBbe8PkYy}_F3V_is8A6;Y$${{td^Uej%>y;*+Ja^)`tEOW}@7^GkbxThM z*J+{4e(};-XK8I|##E7t49;s3{w>owz00MoEgOV?g#a%Go4agb8aM9h)pF^1bAApB zyFe;~(C`C`k{onCct?K%SX1Hmxh= z<>XvKJ{X^j@6s%6Cdrf)gdF7&&zU{4bAKuqGm_M?1bL%PV<>PlJKW+U-Ik(jQg@u8m$Hc4L zvO99*h!+49{}dm7#r}10h%6d?sczj2Muc+I-z9@QjF>C{Ciqyfy2mMD(lf`(tWgo?wQNooKsy3@5Ig&N1#O! z7V5s3!XtKZ@J!o~s}H;_toX`aBlI6|?-GXYBr&P%m1J9i&N9(O(1vghEgs&)6gieI zIAHU2uVn1@7{f`f7eZ^Gv1#qPH>j|$r>-U>b%y!0TMVQ02oS@2Q|&ryR?M%vX0SR_ zzW7ptS*Y5^4WrK9YvJ%Z(|t;l1m4XjZtG9CzCBZlx%nUv-8S}|@k8}VF%bK;2{i^6 zY;yQBHYVJ7W~F4Cwlxvv7ak?tP__7d=Gz04Yu^tNJQpZp^?4|XZl3wko6a5mQ-cU! zN#D9fC}KdMEXh~FcyoczhN?qs;FYwJY+$B1g4fKqbC3E43ysRMsFH_HbDf>dw>|<$ z`M6?H+*9R-8w$CfwUZ;aZRo5wT}D8?c`18NUEk;9pgI;;u&PTFN1tA&_xJAI6U}{$ z0g9oj$ZXY;5*aGF*RQClg9ldxw@6)oJEUREBzadxLh9*p?PkB%8r_$^gGJTfjTbTx%RR z^A|IF+ zMAh?DH9Q$r*8J%iV7nlEM5zPT5Q{$O((xmafntD5HFEQw?z=+InP0%Jms@&a(LEITyE<>0nA6{L7bti zv~)nn({|T+bLT=X3J7PR;D1g!A3z8oa@jSlQ$#L^m{~=ajPR!XwDsPK zFJIU%f5UvnA)bb{98gE8imS3~uj9Y0B@KJ{^_ z9XirW1gz9QETk&h)U?XZ2=pS{_3yQcig z>YX)cz@XtzG>$rt*>bLCSml>{E8dS2$+gso$Ub@_H~;#s3S#`=`Gn4ySPs`~Sy62Q zNYVlVElM(Pc%wbM+5MIAv00Oy8D^L2&s7!1@q{Ad`>{bPAC^k5)`l@}7yd*bP~>WK zKW7kFToTf+3*&=9`axO4gJRzYf!>Hz7&Zi8+XlNgqhi^c3a7vC|p+UNKKU=MdIFw z!||yWPNz|=LF-0djDvKhUq+;x#skfXO5eoJz67(Dcn!6cIw$c|?!d$%DkhbMHo-98 zw*5*FCtf&VrgGJo)b<;7LPEr$Ar26W%v{g#R0?wlr`TRhCE%G<%_DeDka5s8bL@MN zqM%z;)f~^V*4S|KxROf_ZTT5`oK2`s0X)(8igmxm=^l=Ym6cBqJTm>wb*rzNpD5qn5a!0ZV?rEK&* zfzn>=;_TaJ%0Kp6avFdfU_9bqgU*@!9DD>8$e*=ZW@#EzCWd-TGT|LZ{n!6UB86TY6vwM2~rtE{~8 z43RAGrg=X{|39kUJfO$5-TO{N%1}wBOd)9$849T+$yky~Bn^^fQOOuWks+i)p_HOj z5<*FlQi>=tlca$Vg(UQRPS*Xr@AJprd#$ybe%Eyl$MKyGlUpaP5=77hm#wG*N)@7s z4MmbL8N;DF&HBz_^*d+YejT4?5JcUzdx+myk`SO8MNSy+vxD$r>}}~gcBMr4^zEDt z`_n=(mE9=)#ye-i<$05?{Y+ktoK@yAiYy?({JwePLW~Yb%us#(aPo{wIWwLDe76kI!oy!hwrt`y#k*U{pce%0rmUZi88AQs zRybkdyI-4C^qc1oIK3z7#|7TS(VtrT{U0l$PW#I*)!xp#nOmI}d1Uq~%FEEowJ|9x zvg$OHF4naD&MJIxdaf+Bp~QhoudLv(XO{;^n|R9qp_9i4ncWZ?7rb)&!{jj)#HHN@ zU23KeZ}8bOyMc1!Gh>Q{0mDJN!?_Xr1w+lkMH~_bSS0Rs{ncYNJDyv%zRD+acwaPe zoCD&*fLkS<`oX*D5g(92fWn<)3b)Ar;J7DGp;Sg4ve(@4xolCLTO`PB6a_(ByF1qS zR(cYzBhsIE1K<@E_4SVN-r8@)(+3qifvAX_M5%kpTfQ^<8no$9-Dl1R#9NbwwgrgW zoib|Q`vIE)BE@Db{>H`;weYdeu#@XENA|#g>2mIU#8Hj7?bc6v1@50~a-E4|j1Z9u zpMEo-?R99NBkGf8__~b1WY~1@qozT&^yKQz$F@|p_j*1*Bm1Fuw_Wqdf3w-KWbWFr z@{K(t6z9uX0vcjgp&S`12#9GvZf7Dn6;hR!(xjMCw^DTt_Bq?6{&VQDT)XHuf1iCO z_y26x3>!~XYsIeJyF)HQDJ(+oP}Z?w1_x6XgdCc80&m^*dZU&Tq%z0Tjhm_(%W|=L z!DFSO%32(`y!g${i)y7eyp?qeE%mVNgsIOJ-0f98Xg>#r=;>)oZlp{A5J-#2yo^w5Lfna{^n5?1%wBQ3*dgxvY^ z9r{`)_U_x4UtaF0@%X?UuxhH#s}Ix9#pRx=8T<12^3gZC>G{Xa%R7Cuec{mes=wYk zK0f^T)yvWa4<>55LIpqrKGBnr0(yn>lHR_o_yN zWY^mx200sa?z`|+WA}E(1zrE#^QLpu!^E`+yR?U6)4YaEGibP0?BKbQ+*CT-wVCWE>XJQ$??uhr|n|m>&9PRkH_6!d&^R)j(bW#!$9eM z8Ba$ZlDMP6aYYL(()hqlMj~{eZQR3^3!>ykLoL*!0KozR_t7o&V6 z@y(+nE|G#T*LA*WK25jlgz>qzE{Ehg1Vz7#KOO%r@%zW>HKAiP9IM;5kJ*0znFeEF zXeb-*j46L}c>?4E$WitBOtTZWblZOR=^dY0h|D-HB0%cagF@S(fosoim*0HJ$0$59 z%(Ks-VU^)|`I;-n9YJRx*7Ed+j&@^S zo;&|0szw0~ABp4TqXtT}+CJ zq=un*LrV|0Z1hgG7ui~%v!YqzEUktL4C_5C^Omm3TY{qywjcdcZ`1o-zpB`=SBCfJ zLE$#Ufu`cx7A|8C=0riKO7t7Qmaek{O|+Q8fn9{+LQ{}YELTxybLtp9Y|$F$-UFK_ zw;IO4bz6${4GsHo6)=gwrur?T#$t7xVR*`seXR#Ttsq$Nzm{8Bpi; zwdno(_zgKJJ>A-I1*viiAQDyrFlc?(pH)-cK0$WzTuZlcP>Pc=h(ysdk(DP7iffoH`-oLRKw|h0%$Um1K`&wK#C< z7gG?wgCjC$AqT8WFn|E^XE!wco175<)@Ri@=cn5HWoxglwj0w!TUq*G#_*w4a^u@c zPn%bbw89rqsAPTLHnq2^tRo{XJ=*BRKovs(XzOJAxxI-Pe?_3D!u3h$r$+rP`hNh= zyFZARM{^Xge$VU?-PgQ0QCjfyNA}kDuv#Nw6$BjS3}OSl{RvgriIkMCF0S2I*FEg~ zSFnZerNqNfQCV4RYP97yZr$2RZNgHIJChD8f>B<~ zo<wp?Q}G7lgIK2{+LX_qf>zwDTGiPRR^4fkp=(*{3Bjs@ z!F*hG8N zf}%OW-*S+Y??4YNraC}^MDrr;ksipXvDPZ<>ULwl#ieI`#vvRGpp;RX00QTE{eE6h z)$piGzv{vjx@z|?Tu<@$FcxDg(djR$bZQQW9N9k z_!h%eWxFujplna)l0k{zDjI&2$;NKkMS%~>eY>erI_{83^ee=jsFW<195FjllD>4P zG>VeIPC`T}g>Ki(tUU%#7j`a+p=rlf51oG4kBEPYH*uN^Phq!iA!7{8)F#NB{uqpe z5?4xJ09u_FI_M@DaJ>`aLJT^EMF-2j2CPkqjrjL5LMLMJ#u^y>9y4^p1imB}o~!oh z+)F1GuRg2A!9e5S5r<1w==_O{GDZM~C2I{DghIHUh4u;bP) zHjg77TNE9RTH+k@tot0b=!HtR)n9Ao(y)M>F|>D_;PQhbub2&M%zfe^wft1jI#=Nw#k^iTF~%9bDIAujbQ~y%X_*F z;fbv@m9Uiaw~A&*5;GM=1Po$iW5-ii066X(Zh!+zG+6Tt-TE zJGxL&T44;L7P5=&JOPP?n1~^?%*tbjKkBl9w#~p9=CF!_*M&cd`Hlla!rnV-P^zD= z{i&iD-YTW}=1=9`qcX|=k(K%Nq%lrzZf1~u0C=}`7kzqoHvU`op1aC78ynB2JKT8o zGV2bCbusi&2sE~JSR%E3{ss5o%I}F6y9dDD^k~)(I50WETFl`Ak63jvZeftBQ|t2W z^D`7GJUee@BcV9}`Hgy6pw z?lHselYgBrjUT{qY5m|Q0S)3}E*rM>BT0{dNBeKNh_Q@7ry}r2q(sp$pfIN_@MAAp zmr>Wmk+Z|~vkYIBwucFZxD zQF~{wbwYbRUEiqDFFJU(MwusXml`tT+X%N~P1*6AZPzPW4X>M_<#2gFMu$sRBL8iY zvMb6ZnN3gg(5T2+`U`G6I})BsT$q?)=G%NO^aV&?i*mkWTC$qJf3RvR-b#ni5Cgw~ zmS+YZomJ3>BR2V@%M+^kie+C~=s|dI z%^67pyUqNqBt84}w!qg{7w+o5nCEh}=lJPgzG~_Ab&@sxR{BZB!+|@h40~&t#tA{$W7S{OHI*W5@PH@F4+M#41KuZKd+IV33~0ho?Jw$3?^r+V^*} zP0_QIZx=n?uJ2j{NuIxrgOJ{QeuJI=cn&P2QgYbuoY(wO-Y9CMi zw5PAai^BlgNP@<^D~yJp?({)tGKisO#FeDOZfc<&XLOj9J!+9<-+M(rHfN6-a#Gei zCPjya__$2h4nghzHplyG>*hb7QxtV_o6|cj+tB@ATTOnpjMmk4BCS!vKHNV&DBEMc z(>Mm_Np48)5AMvR83qsTAMTJz)o?B7luC1 zNZzHLv_hLd5m65itV+<3p+o0T=O6o+gDO}AM-^0UVrD|(ml|{);Bynrd-a!S{n^-+ z`>hJ3RMag(hDQz_d^$9Z$Qklu`8#gnXD&n&BwU z0{8>)pH8v7u6INmBmF^hY<}s^c#5NtD(ys>xtW}d_5JHZR2O}ZsSQiK5V2LlT~UT^ zR{e!t;1GwsaZ`5ql9z@l3ve0#~9o{~W^JS&e zX1T$#28^iRO6tu`xRPR#ZO!}{y-naz+76fffK~w3zPx`D!v8mtebs=!38Dzeel)0$ z@GCL1fuc6H@jK}KNh+qv&7m^6@LUpt7@K(4V5WV{N0at*G@RwfEADy#9R|49mQ{Gb zqHEHED@xptpbM1_O>Hi-Izh~;gy(#Jdc4Z2iH^M5H;$eMRYCg12=A1apGVo*+PeQr zDsjg-Gk_Wc99tw9QN*qVNfz0M+%;;S^rfY4JWo^VXlQI-bogthuexiJ=lX^yv@>2i z>F+V3l5w_r2KV|C^4Tct?Pr(~8Ip@Uv*{J=dv4+-^Om)~VB|v0vA$y%oH5_9y1n6yXRGEH5WUB|0gF9rmIN8|L@@wc!)q z#x)MF&kYTQsnMl%2cA52({e0!a!^y&(U@U%xDCNErX-rkCbj$|eF%xeamWdRdleAM z5Sd1KN69EK@Bc(M(0rl;Ky%qmQPFxpO%ItVwCx->1=G#*ua(-aa#Df^`EOORr0QAI zdrYE@v=6oYM8nDWq|@ETPsh1I6~AoytXVpb^0fkW-UcvOOIY4;jPAljfF-mod z#JW0U#lrVP!UrK?y;+o8|F!_#n2aS5$b^deiM}Or{JO1xB^)ZTCr^4Dt0Ndk97rM$ z4E3r~v-h2Sz`D9A@_WrfjBnrB5+6C&)W)#O6{S=0kIroHoHa|$`P})q9i4hf8bPj# zG~m>W7mIzO{=sy3l7?7n^@7vmePlS3+_jt^h7=`?_p#VAKI?emWs49J8MYxi`V`ml ze_@XFnN#;21?LE+E%Bx|6?~~(nn!n}ItRXSqQ-q*6J1g$Z}Kw^QK6Zjh`&L;Q>=uF5q+Y**BfcDJrp^cP$r71l8q-uchG+b z9y+gwBe~uzrBzq^b$@GDq^sqzFej!uuIA#Keq)vVb3}?7cio9m-}p*+4GBi~N`82J zWI@fRv;VKIzjFTz#% z{KNJvm7zeA`lC&i^FPGSRFq&9Re=)c-n=;iQj2Pb@mVoK7xTDwlVuD0AB9FqZ`sbB zUGbw$9*%1P{dwrH3_Fu2eJ50T>GNBckiIM-l4{{X+wcb_jH{+Ro&h8BB>+2*K=Zs9 z1|ft;oKJ!~MNUlw+hw8k3s5wi zoLh5YfbEyMp_1FziD+PyQz+b<*B^bYHp2@t?CtsEX9e#I)_9x*A3%ri;EZCB0mU0- zL0h{vx$oyn<_Z05D+50go?ltBkM@v#kpuE$j;j{8ln8f7o_YB{M{)ILhRnQO*OA9I zBt$5321utD7M(K`1oa^|&?t@k-ho4AAd=!Hf5mhB+p*y2V10;ToLsa#+IR1PaPk4k z?9#o4a5OBTL?|5%fQb_*F)iC7?lqzmiYY2wH8}0-<5NvsAdjwHOOG#GT6#EZlm7d@ zZioxC98&ByQ%|JJG-ZR^+0oOj{m{d*O9?;oTY z-DMGU5uKBR_D~=*z5`$kV++ECHDm8(fqXOUnVC=4qeM)&!yw_yT<%9kIyJ zqSg~YHWL&-;p^m->yFDoDC=!L$sGq%e*ZkZ8v;!}-9D7|5h4H2N&&A~;rq{0yPTiC zo<9yTGvw2>OQ+~xt`C(Cvg+)&G(*0lq1_8zZ}rIEuYZSke5>;!J2o}#t&ZgOR>2N1 zX9Wj;=uIJap{7{aoFTt!9>q5n9ny-kY-@sF>`}+=Q<#%Mus$T63KuucEVbozp&-GJ ziUlgnBkka5o{)Wzn{`En>1{`e*{=_6ihA;Pm{iyE2_s7N&)+|PO)`+AyS!)5j=(T3 zeUW>FNfRU z|J^|OLGc))nBCj~uJPdBGIu>ulPp`dOmML%v}IjfD-S(_R%`L%risyD%koo=7BJU!K*A>Qa4Vnb0RqMG;&AvGYei>lwLQ$s5% z?0lH((HVe27Tr!o8{I}LTOuf0C}4x+l<7h z$4M?MZ*=S;#!>{r*QPsuVX6FKfRz|G79JjM^DB>yZ~%iisIRc^k*b6qQcTlg%#zjz zQk6vt@b~Q2mWd+8V#4-X)dgOYr%b6?KS{sEd#0TKIb(+efIxqTs-bODlWt8n*LmO> z?ec?^GBJsWzT*bbWmJq=zke+2qC8fBE4WRkBv>Cn!$C`e*hlN*utObp^y^nRXGSLO zpAjfbXkB(Y9c&em45SYbSK0%`7N1D?wkT(5a&XF-3OJu*OEe3_Hc_t=-Cn@m>aCeD z+bTh2)i~Ace=-N_3OicTzn>l^#rQ-Y8wv8pR)f&6*?xlIBviO3>80S26xTnDXII zuSs%9KWE+8z@dNi$=j)34w`P=lT&*d9JL6OxwvK8Lgqxs6Ws?p!!kHU|3xbg4m6X; z=$EAZ?1^jNzmsiIr!D4#QByOL+4CSqIMz70ny%he)*)+Nfr4%{p4=bp0Xurdbo&GLG>+S zJ!uQtyJ$J~9=^!WTCeZDge~94BV`j6eo)YiiV0g(Tt3=QOFTKyFEZM&`b?UP%jRg8 zok2lWx5HbP)Zec3C~TehJ88p;K3eLgouAywx@{}-q*tBo`H=@#baGBK?p^fQZO;NX z%_lcUd~;oVZ;Xpf`ZeCgH4Z+-&6$Sv zO)Wl=oVP?Zi4Z!}F)uneyRuPauPYGhaQ@= z*52Me`pWj*fq}j~4Lm$=8O2JA@x(N2Hji(RrM4~rBP!|qn0_51pUs&whoI$6b)E}- z`iy?-GGB9#$;(4cSBrJ3HZ&_+CYCXNK(1T2ZMdzG1TeIXXN-{{mb=!^tdqLkxxrwT z-In=wmxHGmj4v0>hRBvW>pWZ`It(=OE-UN70gT=jfc&>Y>6%;XavzK7qLk?3OX^kIxPy_zQtD*-*C% z3r(VDi{-+mrEbMf8^Ip)q~>J^ZxBusH1_s)uL8b-+z!iF-eXQ~bB1)FyqI>S;;1YF z^GWn_cCH+AvVRAnDB($|_5ihFX|B`1t(*7x?AR-vv%3!IeQ;gMvi0S^ChzK^-@XG3 zWDPKKPPvF9wIdE4e7pO%xoV061VtW|)AQYuyOfg?k*v6h!Eb6;zf z6rb8uqSV#6vH5|Hl=gtE%*d1zC$`xqS7T!kEi<8sxM(7fQHs&!^TU~x-b4#2625n&{6Q+DytV3(CP{rWmD zV@-Wz&A5E1z?EKJi)lO2x;NgRHBpQ(2DS$#SG%{Y+pQo#IX+DlmS({evM<<)&7cke z21oIGF9!48y0oXe(nuO2F=6-d`6XTP@rnPQbJZexHVKhByLPCkIl|h> zN=ko<+|b70C~N|hXn)_VG3lLmzYaI|6hj>NVJgAq4SO^R<)#0Te&*%j7pA@7$DFQT z&!57dz|irHU)^A zfts4r6}!p?B3f+g2hYP11JYal{{H#;H#tcG37un2t6nU;hE=H&wHq~`FkzxlT7r!? z$v$ltWsW!Vdhq@guz6{nH`g*+o5! za$rz@ie5?w5gACMEyCaO^E><%CUDojhAk5DSM<}|>fIn7hxSa)#CN@snEo5jp3PU3 zo2Uz`f{y$X{xcB4j71MZ-#8+hj@4;N0zu~wYL2CLsoU!zJ0Xmw2W zhzKmG+45)5M4)dp!*xo@v71UA|GrW@n#x5q=mMU~L z9@L&;FC#o(n|eGM`!PM)X!r9v-{!X(7uK;guAIBCnpWCK39}#}`L# zg^b#64m0(@!l`)b7A0aov;MalcTHC8yWNLY(Wi^pC&jtMXZnHCqWQJk~Sv& z3@9uI&WKcetyuX@`Yx`nmJ|kETh4gDQ&!q4+sLO+GdvO=uDj@(1Y3cx6Z0aGe2Ab_ z)OiEnpR!h$_5H^yT^WZLT~P~W8)oF7WCN6|9TYV421RB#ft3$OzDH4v2Km6@!=m)& zM6ZC$6Jh|UIk|h%P6AO;4Pjo%T_I+S?L6Ki?VrY}%2D2e2L$fAU3SJQ!9VaE4^aZN zaw{Aw*N^~LMacZ~?9KflB>-948}TX}HG208oaW+bs#y~L#I~;JuwAO+6$sy z3y2S}WjaoznSK2m`Z8PXN;dupXwG;P{}`J&3SLc(K{Bl&v?wUK~wLmqGEd-0-DZWYg4R4){7{LSDG=Z9-E2lmaNztb&M9M!lH!N6AH zTp>6eJemSssUY3-QpH`;Wc!0EFr}l7UdT=^4EA0^_cTQ!BB*JIfw6GvoOW&O`Y&pX zVH7Fzd{jq1r zbnGenr!4sXlcSk~cJN)rbpa;9OTN@`R1%sXK(xn?-92`oL$eB8e7mr4F`J*8(5UX^ z<*_O@l;q~~66}=aHDmnw93D+IC5Vk##hM)^)h$QBc2^WCV4Ly;NKl(dbg`9)bP zvd{*Z?RfCHU@a_`9A5TLQgdU`l%NP|v7?a3X))e=3qUGL9&vl1x z4D#m{NGSoz!H6X!_H)|sID{Muc9Pa~V3>tScMfo7pv-;Nyx@Atk0LPBA!F4}^d=}M zdZC#!holqH=QR2PRRAF4ckXb|KE(WYde0z6ez4i1K*J zJBhlMlD*8r#dauWCYoHlUi9_ChYugbAX1JT!DQy0aZ3yg3>+Qfr$&X{5MYI~mHu5! zCxYswwvYiS1<;mqSDwKJE&}Y&4V@l*so}J~dGX+ivegqNT$S8+P+jle58bDawYLeq zcW!Y;^+-hz`}c>gf$oZjtUIRFHzsWB-Kgsuwso*exXWg@eOsjV^}YNykGTUXa@(BK z)NC$ZYOB3%_fJ7iQ<3dOIE1EX3G!?S?p>!POAcuSeLX^Wt-xRvWdxbQ{Aq}j?(K4! z3KK>{JDWHJX4?pTGLtUv8JGc4q5HY75F^ZElvKkkW7;tYio%bfUGhw)_Oc+I6$jC^9 z($~wEc(San>K=H6R)RxtY1-9qE9&D~R3wI3`f5CryxT|eq69b4+M_#c-IE=CPwd=1 z2nzg=Ee z0A+g3sBU=eGpbr7PjYDtTackA!Dn+t3=`#g`X9@|avd_8FsFg~ZL=I6)k*)FbD zFK_!dxT%_KxY;`H$;q1&Z?F#f=5Mn7kWazMTvWL&3t6ITJ{}p=(_*j!BznL%(txgq zJ!e4K=HJa7AOf8>*xUC8d#gsBWuHF|11QmVIAZwBFwM6beUDE)ac5wDRS&%qPx9|z z+wG>k*~I`xHi_oK%%1m5D{3Bi6dA`^LCJqAex%dTCG*BcPq!*BkqXnv$A`+``6=bM zJ!Z{3J;MFB5`To$pzi!)rOR4*15#yrNIp`S|50~&-M7kbJO8nmKR#~#91U)6PTWW8 z*U!=$h>HRgLBtgxnb3OZs%mPQwaRdY6oXmG)K>|hVMMYVM^SpMwR8rOq+z6tG5^fu z$D>oE)5}sh!R~H{3V1k+g(=3gLek-K=|h6F!!Lf*U4}NdDIoZq5-MuBcu5)>yD8un zxw=lK*YlTbp|Lm#pq?{}-b7|ytdx0SWz(`A&9%?3DCCD98<=VSAka=nVWj@~k^URz zy_Q+$Qa{VX^nF6%ekJ9P?ZUM`x?4j2F6`|n5AKpbR4>Z zaQ)edzc|Z9gcDdL*LQidFTD~Kg>;mdq@|?66))lyL`vxTmscGUJL}0R>+Duj>Az*o zWu0%SBbF=;N;z9{D7x+A+9F%~N{!UtP6MI3fPs#4JD3di;%)QIxtdo|VORBf^77b} zyE(SM?+#W7sZ3ux_I=vM_eq-i=e^|bd{un^?DF25U+d(1%=`H|t&?T|J%JZ%H+SJ`9 zdKb?Q9`*al{H6^(qCd|F9X3bx(Ee}HzF*7cv6&$z{-p41Y&!OEHO@aSMN@olED)U_E_b=yD8Ot?DlR=m(~f-SXePSVZ+aQE+48ez@3*noW0mkg;WUNcO|Ds)pY~ZUs3eR~G9TBq5Gq_sbMigs zP! z^HcE$x~=#AG^^=d^zT>kOW8v?lRDku1A_{Q zvE;Dhtd05ZvR%3;c|5Gzj}EB5$FnA5avE38xHEM_Sy@>eZa8xe*=O9c{_AZrGBQFn zP9+2>Vk!bF5Ctnctl_+i^Z&E}92)LBcZz9FEYKx@LQ-K{#|?gO)tQ-Aw^?TVicrqGP^d$=f+D7U$^xb57? zyW>~L5#eZV9yt4%`oxX9)Q8;p+Ndb&cj2Zl-*W37or+*DC zyMHR~{@JbuBxrRJgGK1qXjwhNHeDw4pJh2@?AXKbB=-x=`h7Od3@S5CQZiWapwILx3nyF5}qPlbPKQiqFErS>oa;T$==4P>^RJ# zSzjMh0#lW*{Q7Qg@{brw+)DmlN+B6PHX{a{X3NMccQ)8m+%A^;IORq3;&5h()D9@S zAGhvq=H#YBCrr9cm|Xw5nFF`MTi98A~K~ef#$9H*u4uX^Zd02X&6|-JExtN?rps3uo*X{U*GxH`!6> z7lc3(@w)x?$<1%)SlE27Km5oat=Nw?mU$|Ivh_2(D z(~b5Ojlddh;*l}YxHxfjq<`DD>xZMg2c1Mfi#JLu`bLmE=YRhdB-U6yqR51J@=By)m$+fVh}1>sI509HIbSl`UAJvp&YKoi+7{w#2Xg;z7=!qg8O zIa)tWC0MvH$%vfF4@8AwtR6sfKXrML9UbC47LX?iYE4BF-lV*>&QCxgR!mPz9cI~>)tMV)qQ&Y{0!cIV+9bR3( z@bL|GVRa>-Y;lhNqbnOykP~9W+^E=i>~1ek3#ed_Tam#e`eKMO>1Vo7n|afdaxN4xu+qmLT+=U8-NUPXJ*Sp=yGZ zBu_D#fJwwWPcJmD?e+nLd>i0iDb6yZk+%#`w?RWfE?FGXzJox>>uZQboQb-;d+9pI znQc2`@}rWbiEQgfW$Xoeo$SmPiwG)qfYj$1$u=OzKT6KjPr?vb4G{g$i;OT~RYAv% zRfS0>LRv+{w=Rik0G-BMVkjpUp}1MG>XwKFqEpU)`EsE;D~-^TfC^}WujGSaMFICiOpmT=xd|-J_p8c_QFq z8gyacnLd5bLGy3SoMT%y-E=<6oNfvumIsQT8-1FN>3geQym*m(TrqKpUIdIV@A~yN zj*jJBGsN&UfO&Jg4g{c#H87Cuo&WXdms;TE+38-cD1^EiJ$d@|$=a`@J#}?!-@ZFr zkuz#~k*%fn+va8e|M&hzc5<9m$He>eLseWhA`J3J!|fJydTb&16ltHd&rCWAy6+M-29w6J0x zDo6JUMMwd+tGyE#e;GeF@!+TCviRx2t#JpMK0G^wLRCchAvw|=F``tV&hboYSd<|I z+_6&{HZrfBy1d)dDe{bM^JLjT{kxZdndIf<23!yWXy7@>#B&Thp|MZ^KA=`a?K7>? z=8SQhBlhb6PUz%i6p13bC(EQKfQ7ZPV*WXud2US6pOCjbRSfH8jgyT~%hGV69RTfO?~+Ru_OG$8)MJqvNlE@Lkb zN_xJ{O;XqtWum-GkMw}SLO%mZQH7pZs1f|=k?#r0a_+p6r%w&;t{RhIz3bPkk7FIW z4mB9rXW7PHBQ*}4xI1pTQKP~X$A5PR1;qhd6Mj+pOE&U-_TCY$g<|e4g*&4P!UvqK zIsA-ehb|I0RZ)4V&t!*5e8L>-@lq13Det&KMB%o^oO}Ibx;MOPBI#`O)-nC1ZXctf|?F6A+<#xov4B@YNy!d+`9kIw=we_Lc&5UQF)C z)^G=se+hjQ6ZW{0vc9dQ4I`I=CNMOmPM=zFl*fYssFvtqsnB+i!3^ldU@Ok`)nY^t z1}S(M3@e6?{L!YnwEu_wl``Oga`rA^=`zbZ0Z!Q`z9Af67+^%5hoWj=L^7os2_MsK z?dGY6iNlh!;EmKaWw6Qj9utowMdGGgd7%{xYmkIdEF;5WQWi!m3+8c)E?&j_xggd0$v_pO!4m z{brLVcT78A6rxnkmZwe}GS+*y`|QLry!)>ikV7vfRK~nBq*wg_XqotFL1qY4^diox z@tc1i1z`33^}|Py!pt`zK1n!;5Fnk$c`TCWkQuVNHI>ZNRaEM?UU1M?)YV;gadU#; zwq?4j-(!^Xrj`$5xF+-HQuuKF$omxPb@oy8Y_%Q89VE zg8NX=WcPX&>Khn{%Yx7u>dS(8zY1YHKYgme@$BR6UCAbAP_;jctO0^djGGnVklcWm zNiEL$Aujj@yUad@@DG6nLr-zar|(+RKX6*?&hoy6$I^|vWsg%1)OZ}4$4;$M@FQjY z22BrAwFe&5Y~@nM4D=K;c~F%ZBL?Mq8#-i&6vAF4#3J5?I+=n`g5Kg5S8~JLk;8`` z+dv)#1?a-ewPJ>+2r#Bi@}qznLD)V@SKcFte%rHXg#pt<>Qrr6VhOk1zyRaK{D@l)_yASCcQ`hBf>R0 zGSmf~K8}o)tRtGY=?PYG{CMnuHR> zHoGu~BmGb^Edh5Qj$-3_^ZkbpmAo5&E?{F$h<8XI!Qs(n5sig^K)})&7cmlw%m|S6 zHhMRQ;w2GGGs}$UqXz6PW<$(v``MNLViKMF@q{a5SH3cW9~1L0<9P(Z2^+p^n=Wz7 zp4XDdhA=IPeoBlA!1NC8a*xS_cl~4%oES(dBI2Jt>w~#P1bbN}!~&>`)O>UgBK;8% zlH)naCW@rhbL>;`MfS-j`66K+Vf*mJWTR*8U8}9qpbG|cgA*Q&VQ-o}P<=7Rf^fJ+ zJbcleKt_g;dcoyjSnHtg(`9lGx6b$wmB+2#L&+ix2?))(CWa0gG|BezJK{CVH??Td zr*SZt^z$sCvJ-7HdnN}RiUiCW=m+%Z(FWB2zoN=(;2~Q~;VgbcwN4W{4G!+axZ1dLdze%~sVP zpNQzJ$Zq|m!;^s4Y7=3eC3k)KN2AD};pz+qHeI0L{x@qrtvvPc{$+C@h z{WfIxLC~#`cVi4dwSPG>te%hrIPYY*yF%40B|+~*xCr$Mv(yG~7;qU%ZxniZ{u+gy z?b02(7!fnTpi^NaFzGKJyl)0nKY)X%>OqbOaO0dl&S`0KRS!!;<=-0u)| zpVm~zX1H&6s<`MgDeM8^EQcTv`X9g?;r;>u8%sif+|m?l^~R*l8Zim?Tc8{W+8QJ1 zhPkH|p+-cMn20)@Ln%*-^PQ9mW|#KyNb9|1NsWVSK+1-Xr1#9N^ENI&LLB#HE@#%O zS+Wfz)nvdhK_N4v4E{a(LeY*dfP=#_{*#(aS%ktG!eaiiyzk8B2-TRkmqWO^?y!E0 zCChf=!{OxxJIr=p)d7Lvis~5E2dx!8xg*OaLaYw_A*UxV2L!_*DpP(ZHPHF?*CW%m zTRAJrA5q%9fBG0loZK5Nf`wQ=MUU2kVNmOv3^s$B6q+}2akE;4Ks%>(Gj$Rx zHjX8Nm`KKQ6-W}!UW;kNpPyf{Z$ONsNYLkPLAXrf`hjkQz>9{2z+-EA?2)+Pc|L|2 zZJjqk4jr|z*6@oy(`BT_=JZiFWf&W8%*#72|68aoKtQNrE5(@&p2y|Ak^oueL<+1A z$>$qH7}hBZkAc;Sf{w2igF(1^B*y!G-_K&u+U*Q_M9e@4 z@xN>ZAwl4`oS+>B^*ercM~j^Z!9d$6aOoRd{^LoKStY4tm8efm=G1yb$X$5^X_hTpp4 zu-%A2Npb9u3@Re38OT7jHF14^z~Pvn|HNAD@Sx`4DZKDhpzCnhtdUp(w?q>{-3(P! zzBFtshdNPNpjpF5kJgTlmQ^&p@c78uQN2I;6mQVky+hx8u!7c>f6Bhj-MaShHmiBJ zK3H8!*l*M3UgotZcJ@^%QykblNISxF$ODvr3Oq!O$`SvWH}C!av98k*jy%pagp!pE z0H@%;%Xx#I3GlUJH-Dq8WtuIx`LcS;B^2-q1jBZO;r(M z)R5qL`phJugcTOUdY@xb#xvi%dmU)IyhruEo+a-MOPNH zx%GF-sI2eG1>}0*r_HHR-ZC7`B>guc4({K6}>i ztA(VWMuyOKh)5o;qs&M3p7bsOQgM8okb?)mJ>5teeRKWh)@j_>4P^o&tZisE{6$S; zYADZtqfuwnDRZDJI7mpRVHx)YlSRrMO{pUsDr}q7zt}~Zl{NX}w-D@4MPniJ!C;s7 z1zQLEmuZJf2KIpxd?R#PdLeWlH;O9ZV~@Z#yV-JTKsr&NGspH`*G7CkP5~=_$)Fw zwi^uEsiv%N`;7Z*GGaYm z*$D+y|WIH_OVp^H`xW$NGLBj<-di67-PZ zc;0>MC$={4vzhSfk!Z<48O)%r0xq`o+^M-YaLAWI+OJ(*U4tnr2w%a{WdBvkJ5ry) zsUVk5&dwo!>xQoXNs^7|lmbxaph%-3*uQ_jguBhwuV20h5ST+9lF%~7v?C__NoHo- zha3AsbqLfW_XcWo>BRh~t+V8{Y-ZbromCmFb?JOcw8^-wHIw%Ar(FQ<@&BYhVuS+5 zZJyQ^iX0vS&wAiHjq&4;k|i&6q%8LRn?ge**VjGN;+w=y!2K;s6Z$@R^4<{Lh`}$U zOjA>U1^J;xMGRK~?!l>so(^%iCa+wrLl>LyE(FHBMX|o zZuSttj|AnyYykCJiq!we@wYfB{5JxO3o|xVSXdrMiU@(F^Z+}g zp4cTZC!Ia~ z!2N(ttfHUkkm#AEt)_P2(^Y$>z5#XXz|;_C*^8(*n#;tkA#NF6c1$BN7G)|SBfw?ATg17 z4jXZ9P5nXZOyB|!~F(t8hvFCB#Cuh~tvb^{7x9lJ>gVN8< z~iowa+#Ad{9y8U&yKm#~BF@Wb{!wmmY92 zQ`W5#ehyA+0-ZvWufm-|q?QuYE|d-+y5K-LOLlPfQ`k)GBY%hI2t{(%qfDI!Pe{se z)VyZJgDpfs+=&@$g3+X0Xxx+4>TqjX!F(7=nxGBK`-8usvlj9s-T-An$D1DD_)sK4>JJ&=b-P}5RK#HInL7g&J#)wWD^wj_7x5FICn=d$fuRBwbQSOqmC{dA zYsh=|qt&ho_loG&iyb#c7;11K@PGiTx>G?=50ko3HZ&{d_0(e7nj)Q;a=T4a%)HtU ziC-8^b`8*!Myn)3ih;-*NxmAG?fM>-`2kaR{5gu?8Chk?MG8SIkz?J>vRm%2w? zaX_Oi5n?h(S!f#&JHB)6>Q$=-PnfWvXybQUN$wBpm5;PO ziZOA1)!`i?+}F`=^Bt2|&|)Stq#$yyHGB88IOqen(=v-eI2`ojn{KU}2TZKTDIg>< zpc|AO2kKr7=d=WY^u6$Tqlb59ld)#-KozU;@=|}4IKE7U4xl!}<}V_jY1{9iR-_Ig zSHofc%@P080*IeTOD!z+$T*YBnr4i8?}c}u`-{%y+s=etsowN<)z8Z{Ytt_-T9|1+ z=<~+C8(O|CuV`eciSTI%4T;`&7%q~k#eK9?W{!T@m9k4KbvPng&Yh)Y`{pEAuUPf%w#K`UJf`^gqH4t+!pCqlmx~lZ|V!# zHr(+8Ic_)|YMvrr7eOZI2Q)vWDz>+Xvl`x?9oP@|1YRv9?Iv6&6#7Mt8=9@m@nI0# zL-~J#yds1vybC6RRQ-7Nf~$b?3kMU*auH1=Mj>*ln*7+n_j8~QQd6rvq4jNJC+|!E z2#Nu<%zdc+j=%~Yr{!S?5yy56gkSxOC3E9#RVJCnj&1nXD@nk4=8?deBZ1UJm4GBi zoS=eG)hn6EZ$hMCAmB>a*W@h6*SViuQOc^t`b-2>d(_l8LIgCklA=7^1B$tx#`2%p z-MM*r&^%LKeeM1{yY=Xx9Z|B3{)u1dGwL;9Kd1xU_cp+Cge>4Q`@vtK6n2C4Bds8n zPHFyUxt9+6W|<1*0_V!m*ksT`PfB+O%VTg~D^~f}+ox-I&%Yh#u+yvd_@s1&s>9C~ zpBbC3VZ>io01!$h|Js(~sDa#NX2>!)I_5;1NYRlD{b$;&M~4@`Ko=I$b8mP=#NvPc zxh*IJQ2LS*%eqtiG5FjZdp@e+BySIL##0B0Qa61Ze)#YwwlVnRO>i~Yb)o&c0_yYk zS*j`4UuY#My{z$o=D*Fw~H)q3BA##A!l?bxC4Rz_QWqNg5)Z+)wOFj%Z zSRf}q?A?F9x#qz`cg>ETeHLSxlD0!28wBJVU%Wn*26}7bmEKI3O_s7h5x?7{! zM@?_^Xt#UEjnR=F1bTk+lv%v8HwQZ9_StShS0AR*q{f4b1Kd7k080B4Iy9kxN zc#zx%>6?CKoV}BuzXOCz9MKfJGXHc+X*gM*0UF=4ckis#8$5+9vbL_S5xp5sQVF#M z29nP##<)LG53)S5Jov4YWOtc0$NP7LwQ z!nJJmn0W}rT$0%DnAP_4=g%)Zs8vLR&-LE<$&-lMYg^Y3{W;q^lRiqk zV_`i8)e&yAAr_&FkXmq;2a~VNNtSlb<7L|2GY78ZD)fRCl>4s+7U^GhpqN5;M;Rej z0E>`85W_BOK1!Z1l<90WGnyNsh}SzmUF10=yGwusKtt~R`y*$y(H&~fSw@PJ8NY4f z?^m+nsL&YzfeJBw(S-wZ26bab=|NL=%E3ER?SFOpGRm?Y7#4)hpZrfn#lecr{SAG` zqF-pyG&N1_SAM0@W5Kzj>yIwoGHa?~&+P{Dy<2sQ9}I0?)KdccrE%=1z_n)&Jw80j z#pPy9f%XjZ+A*OQ|2dtu7Icy3p_&Dbl2i`Z%{bFy>eLGU9WinAU2q9z!knPw=h4Gg z-DO|Fy{!GP@eA9E3x2??N#eUi?iKKU&xxCUPPzIUZKjyThHM;YtH;L{!%HWg_>K-e zHgd1!%cOQ!^RF~U_3P6A>+=?nH z2P;&yuGnmBUZ5#aX!lj~f~Qa%t`#x7@z@6#izN3{bdC(Ke3->|iCz@ezk;c#oH0mQ z1StLG`TVr9jKpRLwoQfH=Obs-QQx@^5t^gsW|ky!diB+Nb5`K`y^kW;sT8kJK!&-5o!!*IL-U>Ie%u_=dBK87>hnB} zlWpjNEvUnh3r`>=(-a*b-2-M-WIO_Fqi5ZG;Fg8)z=xGBD7l+rLV3J;&z>!;V?OG*Q}MEGolw>y=b2^9W?+#>8+_#K-jVB4BW+$ye{eAWe9*+3dIQJYiTTiRZuOsqtv zJ9gYSP>`FHYG+eZMO+-rooI5nTmU5VL8quGMqskOt~e{%#v6OIm@AL^TZAo8%Q{?M zxdmc{R$PpXA^Ag6?ZECrl$C-R0GvNWc%3Y07hk|-{t8-2usQ4z%wKmf8o-Um(F4_& zol3QHj&z$aWU9_oJ+p4|!&Lt^R_2Ca`Z4sl#Dg8+#0lX{XHY~gWe7=G0tKPFIt*?k zq!BQ^Z=cR>TpSv5@jwsSy#kkWi?zPJ`J~+?Kp!7)DV#1IK)-;1fO{WlovhVe>1Uez z<%=JdmR2^xxEb8kbLGmlOLV;eA_$T&FYPvxUAocCZ;un3LNUOV4GR-OE(EW>JpRAo2 zdMmuXOuJ!eptPff+xD#!e_t3lz-Z>ZM{62+!BrEUoL7_duKTw8w*5~d`_-@>)#;gwu@-8uC*#%pkq;g zKL)vw5UD^7{#=-^GgCyqEcvX{@cdwK_!_M%uZ_UA1=mj1O*!{wSLOtiP@tjgTY>0- zz5;Lbm!Q~gXx7~8}>a995iaik6j}*mR!CO95LT|b{k7$mlGp&DvQhe{~YTn zHBZH4LdxbZhD}F{ytP-7fWZj1wsrxxdT82S%r5`yb z`A90|)!{Qz9`$h@U}9t3BGcV4C+$N&2|Pi9r=$uFIp_SUzBv4eFzmo~5=IoNHUGl- z+olt;#+*1is%WHbJpbEyS@dxi7qdZkOKOc&cRaf^vU$@TS9P0J6~e^!@%fYZXz8Ah zszZvZL`V!>7txdAr3oz(?}uP6i)lwzyv&?RE(d?@frA5)7)Nm}ReAK|RUYV3JPyzj zkXSTvV>Ze*D25$g<9(neXrfSipe%o~tb?358tHWp9^9;x`kcfsKtR#sK3w*z&*39a zLV|mR)vUGjVVa_0>rZd~r?KW=VXG9$4QvytSWv?5$7YIQ^1Tpx)0_yPk`KTzWlAOt zmK_Q_PG5qW|37@acU;f=|NkG7WQ9a#Ldb~9D4|d&qmqy$M1+iz$}D?iCy_!WgeWUg zN=PDwlZ-N3Rw7C3cfC93{k{DD`2K!>oX_XH&v6>B=ksxY-0!z_Kkep%o(+6}IR{N^KrDl%C%Sropw+zW!FGr0qhZl`VU4$m2_y79zF*yFx6dhd)-!n*fa(l3SI=W|B`O&LF(^0 zJV;<=7iTp5=QSP@V^H_(1nP>mNlTM0uRDx78RC@k;@oy?7DohaYrpWS&Nnxky7}{d zrG9K~K1W*{4_FVjszgfSqyDv$MefbOyEyvO{!Mnp2l4}%#UyNT3tmF9vLgOA@Jz#I z%^JkSG?=NTh#W-|_`-W&%ZryTg#(*p70g?!vt*&EbH}9Q(kszdZLc94vW>F)5|>7Frng ztXU4f_%hE&XKqZxQz=;@NNgfL+w!LH%s^W%SSy`4a-^MtkXwvM=0Wk&$HkNT1>e_tHbia-z)lYsJ z-}o329&SaHv_oqK_+?lC)Xz+~!r;P~mv^UTBdN*1-g4OT4`WDb+U6J?$p>Sur;0}i@ z_O~@(6U0GGFb#^Z={mzpaq*zfP+0yv7J?fk;4pqf%p(Q7 zJ3wC@F5CC{qK~knC|(!5pK+;u`}X1c8d0{YSjLz&%uiFTk0ihPmF&WbE*(!l63`W{ zY}(lW^ZN)$jfv?%cjD z5utmy@(LTLPZXW$K zz04vHqI1<#&sa}OSncgFV+Q|#-*Y)3jxdLNqkmLK+Etz#=xR`R&+!LNm-Aeko2Xq_ zWNW9`xRJ)H8*Yb!CMUl-+jwcGh~(`$lg(ScsWo|_9c{E#cY5jker;sHfTIG!H!?U- zI4w3aqN}@3w}3jWbxjg2!{ap-_%nsiFLvQW_nxPD_|PGNEa?WSiW^jIkNMQT?{M{T zwzG{sF79*agWZqIpE}LsX6Yni69_ZOrboIac`OLyxg#m!lW-M-6?Ywwl=>;I*O1nk zE7KiLUa@L%uIB{%ii1!8oT_a1tf-I9=P%c*d)3`+Xl7=X2|##y*jwTBP!5PYbx(lr zZz4{OSJt92xDfB*S2B`d2mZti^E#XC7AjlMf8AD6d2!A_QBSpRNJ7fdmzv_$bd+9G!gXVi?EX@`2JU zt~dr5_jf4`7jzr{NUy&F_oFE8=#*%Rd|AOjnuLHeXKw2#kI2;g8`1VHm6v`zwM@EF z{sWD6@jYrDs(m=PE<4sTyWP|(&BGy{-6FbOp{SWh#{xQz$PAduB=zy*rW-d_o!_78 z(u7Nt&AhE1r`KQ;Pvurx0NUsjuh_3gkFBuSCBy@cNqS8;U;_g05H!^1W=HrvYO7u0 z;nyjrW;-r)P~E-{2NL7{@6WLrk6kbK2)G*=19NE>Q?q%hE|?TQ*!|0Ag*Ih_ zJFaKi{rIOXi^$OC;M6$u>ED-$R?(oGv)e(@CknG6lSsh9 zUHR!H2k$$34=?*0&n8*mFqy+F!`jz_=*HW^b2^$P#5n3U9j{`4L;bZ(ZIB`o7UxO z7EVUHiS`1q;cD2!@yL!_D&Ck!bVX+;)vP zZbI@R@EU}K7~b7iSY@cUY&jBrMR+YiGlyk430nKry{x+^k4X9(nY?i&Z|y$#p)gyW z%=rdgj~trqnAl%oL4$Iq%=7ws?v1qcW82RD@GL@a`Yh$y8jY1skDN6jBdT>ktY)I>5Wwn;(Vzt-xW05d_lSg?QMu)TNJ z;)tguIIytcz6p_k^~PufmKa1|O?s5IYyRY~=MQ-v^!(<8%_&iqE-;i9Wk+{n{28dT z=2-S8L|(}m8M+yZ;*xvc*mW3?R?;WKGDX>%P+PN#mhh;H)z1~>nNe9@u5L+PE^qE} zZu9U-ldMzUoVK{b56rZY8r$?G5idfmFYohu{P^*h1(wkRXnkMn;&P`~K-#+83-3}^ zKX!hmuxL_Qt!|h>(ctNu+)F= z+a~Dby0hl$zeESdy9UnH6BNWY$1I-pWM*{p$Dal&`%KQLS*sHu3uY=+CEJx9V?rjP6-XvKX#|K7S-k* zco7uBve_Yw+1{Qyb~*uZ7A7Wk4PHu~-~IB+PL|tK-Je^8rc>ioy!4Wf*e9;N`E$*K zM*}ZixUi>K!4WtF3?URmA4;(e^Qee^v8%t~jLv}~$g-KsZcQ4ui0rQ{y6|u}aOu7z zHBrelx;UrZW22)JTs=hm4qOs`(!%f@Cl+j@ACWpD{UAZWaLpeB1_;@pBfAR)x1`l( z`PF^6%F|4^_U8^Fx93-*fvFesD2KNbmB1aWy$sm0W@AAoQ`PNCT+%#y6Lgxe>q@ED zFC4*#L&a*~^kMt6D)rEfSzo^GdZ@l!6Dh9e&Fl# z;V;_@70tcMeWAmZVMZ@1FTNbY=xocJ9o%T6BA-~tdo2^B5?F%=3!kxh32PoA+{KcT zIcL*|&T}>9Rgy33Mc0H1G4L)wzetC;N0}WgEtg%gKle1i8&nV`HG6)}FBC^mA{k{X z8jTzy3uQHS&sMdK<*0ZyvhE^|H~FD@(2Ejp}I5wVk)pf6Lj7 z&ScHwJIoxvBRgkw)U(++7wYus64viSs@{R1BVE>g8(MVHF6v>cMg7_|wrP1L7#HhI z`}6P25L*6-|8D#E!Q+q1<9=Q>k9?w0f0X-Bmr2d{b^Pe#Klo^?fY1dg-PG1HChje^ z8bqN-cxW-+mllvF#-s4DFAr4qTwJp6Ln#QXyHn_UUVo8cqt}xRKQfg>#6%4xW~dZm z{WSYpNb-(;d<$K`kLQQo$E-g{)b0Lz2 zbJQUz1DgaSPCa4ppfjc-s2A@ZFZ-EKFz$e$!@)FU|9hhyQxZppq8NsK zM(6jfyE!@0oGb?>Mz~Sc7A=wiF~xWf?QttRd&-Nx+S+#um2M@Pp$V49fy{#lx$;h5 z!k!fMi?VA{*}Tiv>2WHHJq#>Xtzt$Qy}M*7Se7s${A_nnkriI%nbPOi-Wc*JXHxL% z%K^t$mB;0;47wE?dFu6Lo-~P!Xyk;6FWUXw3|m89 zfT{wKJ%8`Vx|icZ7V5XGIKIrpXx28LGtbld_jqX7eCFZT*sxMpZ>pMvnX#yr%WBmE zeItUUV`F1~K2J{e1Jl%BTjhmP2M&)#GtCLj<<99oS8;f+#CD1|$DtP%h|soPe_Ca3 ze^x7`fJBpO9KjtU0TN``N#>^bqJYvh$U;E&#K$fUN2D&`N2Xz!V$uMd?}6thb5+Hn zhyPIAt=`tGIpOl~pqtbDuoFG5&h0gp4JA(^9!xiMGgU1O{r5FeJvw1DAZx@&Jpyi^ zRV_>}e?E5bb;9dITQ=`=bG7)?sCDk!oq55itvanPsY4r=0%Onp-)`u)ckc*Wi?&`? z^LfYEXJvzTEqQ*gL*kC!#ozn9vYhkjT($T~F?z;lWnaB;K|trYyMq?L*tl)m*z@Ou z>M1pLLceI!A@pA|Iu$$0BjMD@+TnfO-}l|^+s!&xUHzXH;5JZ{>2a$!*vcq7^7az= z09Sr^H0BPk*G5l| z!)F1`c3iolJa`6?@M_CJB_Pme&z-vovTa>;S!-};XNsMPU8Wnk6TMCaOtPMyeAZSc z;-d-Rm-j!9({>d5v~Mt_D!uym(){adHw+u=|8~AwyYCMIer4VR zR^I;_9~7Gp^sfUj1?a7ty;j^_xStS{PbhxZc$&r#oUasU=+ONxO}d(@I%d%FQki(v zLamCzp;s41{C%WkE%5%9ct0w4a8m7*5{-I?J?w35OqMG~JbI)sz1OL{%D5YB<2k); z)C9xf%`(2e*lu^U;rgoDK%<4vf~X^95p|Fp`{JooJB7-eVxGlw;_L4qD@D8qXmKm- zXI_CKH7|S3qbHRt_~&!tpWsJ6#w`iSc=oNyil#q`Ud6>7Hdn44k1&(%m2Lc4d^7JGcn zchha#TJiYOe}{>>wx}$NYLWEqIk?gEYs>g}waeYo4OxZrA;|Yo;1ct74+9zuIyK{@ zk@fwH#$zp0l2d|xd@nEk{A9hvptBZ)#J-wL2M?neAu|RyQK~0>07EAf^HPFEMUyzl ztv$b21l3>m3SgOH4u}H;Q2=EnM}}xjYPf@h=W+Ug%38E(qg+B7p`WI4C@(gsQoz~* z@n+bZHb^_@jvxk+4c&QS)B^RUhDP-*4T`3{*HzX`*nXh9cjDEIb{R!E$Bx0EO5_k8 zc3LqBD8?6_>+R!=U?6q|}vhSOGo)p_9)OKZr@0vaz^b8%RtiAK%)%M(HuTMRX zY9+b01 z5(mk*m(&h`_*v+%Y2U?EQ%Z}bo;HCP@&ob?GVLNyuK=?$M5x9r`rz_w4&?kNC!3%R zlHjZ2>ouW2f+r+pbXeEKF2Z7e>EK}*-vjSQ88#A^8k#@2*k;|kM-B>k|MJ(`yG_nF z8`ogGowoMa-7$K@Ock5ghSAfPHdfDHwCHEwCK`I9&NORB;?RW12hW~$D0K{~Bkl&m z7sY9Ys0lVA78*nr6(gTT9+VN=*=EArd%N||_$p5uAIz;??st>EchSEy&-x+{BRn=~ zus{CA9z|t;jAP$9KEpdqAo^T>?TY>nSC^k?JZeJ{d`alj>gcr(eF|IXyWiZLJn)lZ z8Or*sE43-?p*m3D1@Nl`e-tUO%WgWUcGf6*bFd%Glo~iEZ@|>OBHzeKSI+w&9O8Cfgc0UqptH1dd z{Ql?WoyHT7&rj<+WuLo7EchOFw{J`B);-8^&>6D71o1YjGTR*u9J=cEq^M{64j$YL z9L)TwfYVG7iVzS=c6U+%h1=!ROTl}mpRo%1 z^1!u{3JuTM9qF~c4Z%A!@I;LTd(1A6pPRU6ay0kp1jwzAm9J(_!C)t+8c&G;=h(3F z53iD_fftuYMn1s|^BXwhG=VhkRRdL*78zC5md*39Ft*s_x8~xy6N6oo(H|i)XhsdV z9~gHxB}Gym?(V#wrp2OKk$B*)6fXwI!8Nzct%W0NZ=bPtQL~Ha;W#KNcgoqIU@+6~ zfcSN$Sw3N1qA6!b*nHrgy_s1npm%oik?}m749 z&x#Kt_QsWcr62n9vOhWV?bwv!x?w*@$SF3-PvX42Zt-clhB1m zZ0?fdntgse2@)YMqt4&kySjJd8MC!jrt6=5ea0#0)S{XRf0`f!9(ks=0B!zhhivD7aAYhmG5MwHB{U~4-&qP)*3T!!;Qc|fzd zUn}CA3u6vyjgC5zbN=~+vIftVMd#Pp<%7t+?fe&!txvk}$26n#`!xep#>zvuHb`-U zc9f%k-~RhkZgy7Kq_g$V{v;>>$*zkU<$Lu{bUGqcrVR&k=W8z!?w;F;7F1j>R=2 zU5rEDdpS83GbS1?S=LT35UWdJb@DvLQ~@5ioVT)@yD-kGyWsb_6cwX z-{UTAt-PY)*^QKy_t?2&g3)JBH3NX{n|oziQPp;qNw*hiTyG3dpF;K$F4O$D zS*=Ph_Uq|&=;5)<{i}PQwNg^$cI1V7Llqd{x~L!P*m2*aF%y%a^l0JDeAs%)wuKUY z7-+~+nYLnuQQBbLLGz}gg!WIplH3xse{6Q)sSwl%hd@8?tqz)FnpNb1rfdh)RD#D#EIC3TKp=iN71tRNGA_@c#&^ZqfY{eeWhbWw;z)R3mI#CF}d z9;-``6IeS*Jf%y@0ExlQn>J~u<)4D~S82&IK@F7KH~=(j#DUMgk}i1$SUDT(It znM?FKCs86C6j1nUSIbU2|4xCX-UHfrXa2r%fCzX#=-=enRKTJxAG)Mp8<8^0wMeWGXrdAy zmzs*%`NcPH-t_M{ed^SH{8Xzyc^q1k9I9WB^%n=ZO#n!4`!@=ysz}+X_DRQf$p$aO!XpHex1vwE(~R2xNA$GU zSM;L0f*O94pRZka>yo{E-Aw<@g@@`HU;AciKFm5DJJ$(7yDL6rK7A}`=^=aj-*lXaE)YS%o`W4(da^)cFO)6SfB%&w_UGCX=K1g5UBFvk=wY;={wBf}foizD#{5r4QqZB} z>mUA$jsH2Nq_sXSIryUg50?R3HI+9V`S11q$8GXXSIxSq<37NNd#~K@<2$8E$Aoc? z8l#uA?Tr_UG@L(|%NJ&z*koPL&6j?A4am8gmrU5VcJ!W?p;GY)uAE9ZK6+vSe+R2ELO9Lx#F@?&7oASDc+zvp!lcn| z17;?KX~xgN-LM)3DOb4cI0D|30?fWmC)A-?+c)<$yxugh=cLWNdCQ2xJMVZ?$Dg>N z*6`(FjZ=bO?`^anp#W96W!QEhc>wme(?6DU`uA2elQt^()h5HlR#$e0@!7W<`lxBg zc`NU&{i9o%KmefZH$b_v3Fnh6d*{XFWDlD1L?gBMP-T4M6kt)c$7kKyU|bHjFox69XoMiU`*qcpPzRgP%9}Y2r#tRgrV51(HQf2 zU%PoQ;i^PdXLkv@GHAVrf7A7;DIN$B6pq0oep~M9S7HwsgpLi3{D-!K+B6o0p7a+l zqJd*qm%m<_et1A5`f{$Q%RRtW>Z5PwZ$K5gIM%oCfZVk7&GsX(hX0_w zENcwEh(!VA9tww5XyUs8wUF&2!7%pcv<(anpu;3aihm>4^sEE~3!7rzR3`?+1r5~* zvi9E9$n0L}oFsal>DSHW@2DGUV|{Msa=u9tKwJ2cEcx#xBh6 zdwAetFvc0QoF=DjErPazk+M;SMG&pRvYXJT-T0Q!>%w2J8rw&+=EOO`*xb0Z(HrcC zF8>F5ClPZ1T;RI=0I3C{Bw3aGstP;%s>T+1Tz8n0{L=d2tP|Gedg~qBzSsKOZakr- z)4%%i-TKEbJo!{+KGz66%vkUP7P+~QMG<$U94iDA| z>##{~<@mosu#H;BdpNgm74h|XblU%i07I6T2zRrdE9mXFZy9h#yWuWH5d%1=K+`&V z@#68DRO%m@WlQ%eQbyEWY=2M?iH~l=Z$HQZG|t;&W8Fh4*^kn$&$Fh)knz%q)4p9h zagsNx@~U|d6T9oj4ec{l7t&tZs|Jm}a3TKOogs((HRD6zkh$%~&`<$S;$u7hq-dvG zziEUA!Jia;3#Aky(!wi)VHy);PgxSO{_HqUR=O%-^y$2!hi)-`mNEABhpl5{g3-?O zm~=9|*WB;8Vcx!fe;aIMvt5+gX@jON&g(ky8Vvb6hrAsCZ3vBW@q*X)I}a)87h+@g z#{_H~G(}Bs*LItgp94u{7i!|`*P2)RYZh@qM{u6Z=YY_-0GnscK~NJmZvaRGFPi?$ zf-0Tmo)#0>?#pM*1ClfeN{^X{HLOphSk81on?vB63WF1|^l7?@RU;f6UcC&yUecJm zLd$7t%J}6`3vBB(za^5Q%rOzEm;h$sF!{@(E-jd5X4u9Ho_2tsz{9xL)}3R%xYesNyI#zN6*{bZghXy`=6C5nvHg zkRg$}0|(lv#3ZEhZ>0xDha;ho<^?&lX^e+-exruYHubXn@AITW;9R|rjt@nh3yiF{ zTWi4lDKppHVOVJ7wYk>ue+REQMFae-<{!rlD6nrUR-ab8{uKm-ZMf zZPwBFdEq-r(4aG#gQbf;PZsq8cP{z1K4ezEHd0%r0+*v&MSsCbxi1(x`4b`SM%l7J z1LPApA=d4+(do`Ru~%}&yY=ZazW7!j<>Vc6vklX8HhH7vvaY?L92*mZj(ivGd)t6h zNc^>!l#wi1we6Lbq{+w1Od)nMh1$xzS_v#61Qv~{pc4}!M*>sq*|VNfB!mqI4D$!&rBn~R_&jb8=jL1nIjH8v!A0=k6$`jwU1r;*6gq2%*{MOWEd zMCeV~wm%+Oq5>W&Znwtkk%D4jJs7f^^kIOWO{E29sx^K7djhCxXrJva@B|SY^m!Ip zfde#%%_TgxHTU$Z%5^?#jIKTT^|_(0@^X58bJG;MBpXDC3>YfTa}OZQJPeR&6=l~) zRtnPUbLY$IdIsjsfe&PP27n8A;YNHLzDyFWQkR+9l`Mt-mz_IURF;t-0c(mc5;qIBRAL>N3fHe!U{>ktw`D|v z$33j!`&>2WQs&wM#G-Ys7$5oDoKl4wriyKN9-^cX5B^sO|upD z(bMn?rw_B$1zrlR>S0JEa(2Fg*Esg1C|uzk-C7%>Y4}=$UMwL?4)<8rd{l<+k_rQN zNxE1@-G@`vOZldE`IWb`^$^*EvGe^Jh4UE%Q1lOj@%RE}XvS z$)hJggZ|e?ux@dA)$e8W3`l5DP>wxQBpYW-b6i5D$>q>o)Nt(>*kk<3@jac3os8C8 zZ44g6ofqDS%%m~He!rdr06?(1hAh*WO^?e^cd}vl;^@j5+w1KR;L4~2FvgwlT zcaUrGs6#@IO|(=%1Yl!+g!ThB?QED)%x|>G(C!i`%ETNW7S>{U%*BhQkxw$w=o>${ zBKpt5$DPq_ZDbl(RvLP5LuE(v0U>pM{_*Jes=P;wIy|aTKnD<6iWSuyxY0C(7nwErm7X%i=@ zZmjCOHWr}S5iL`}Z&WZO6Wq?qvN`i@IdlMVX(W;h(nQ z_BS7it>n_venv)z0Y%sd(_lTb2z=_vudTcM&WRXua9;k@x)bj;p0#{TSJbWCVqrVh zUyb;mu-=P@?|(ST#BSbr&l1&`gNJ4RIQKgD=e$c_818Z(ZEQ3Z6#zQ{z`|9T;%lmY zOa!le$ZWX&?3?d!G3<+=k6Pfp63v4s;;4eN5m_)F*>Gf5diNO7nGR|fJ`V&nE=V$L zG&|#pWhxKgM0|AJC#xQT07sFRDK207Nt1sb4q-gtjs(5$dFf%dnjJv5k7%Hauq7hV z7~sP&=ObZS{J+e^b?XxT6!&W0IBlf)yq^pGTMwlDna6Gm2E(!IviV9yWWA9mX`U0F z@sHiw$ta>s(32Jxrm{-%8VQx>@Ae`dgP@r=Z{L2`-qJ-m*}bZAxIbQsQVA;s+D<~| z!UtP-+JjhtsnpgpS0^#bNU$Dt>LhYD?xL~|8B@Kckj9DKq;AYww;0{+yqKfx?O#}y zPeCVQ(3Zwuc2CfAF%A)8($KlSkF>s5-M(q}N_zFm!TtovGIz=M$w^os#h=)3^9V3!ndK0q!(Aaevz4{+&C&4Y;mZ5Xp3!nB@s+8h5vDM##I*=g);3 zd+(flI_pyLkgYGyz4%bB)@T%oUJNi2Wkd(tk`SyuV-4CgmRwCpTz18i2-lOalCVqP z0HA{q@>4F}y}i8!oM~Y8r^{-05?wdeo;On2JD6fBEVY#yZ%5@BAjH#A^fbAJB#eY>R3OUeW0IT^0r5PQ-!~SvR9vs+yOI4XWbdXri zMJ0!O(dnlV@7F;4(kW9<+v?zz5_^tUoA#r!itl8(zj(Zg{#HwWK&pH1SqUT~AleN) z9HTbR*Ja|es(Y8`KMPU_H{Mn7d-uXR+nrVWyEL#}&zZ8KzF_5H3-L9BwMc%7TuK2$ z4;(!5$-_u+d44hAZSw)2p(!M)gjxk7aYkX6xmPwXgfx#4ZI2TzMp8pveX@aa=q(HEmvxduWe$_ddMxd4a=Ed6^(n**_qCouz3IndGP2jsu|_H80pD&aN; zu5~>j4G=cctM4n$RX2auUb}^w!R`4Pu?}vg1Ga2~Qq(OVlmeI1?^pS=#LY_xiNxKd z&D+gZ2v&FS^2&g&)g{)h3;#j|e<&>#@;FdLnevJLkrx2Fi~{b?srC(75Gu}F{uvd! zoq)kai1(eD;L!Eae#PUbngi;qZglx~uJkX?_&Aqew=uyebkEJ%$LSc|y>n;k_H7Am zyEK3ArE{oOBy4kqZ{MmL2w#c1NT&MsV22skFPe@R;-caAJNKD23~C!O=%SZbn((3X zV}%l_NlpNg|bw?DHWWDA)7BgRSPV>7#91o%#yy>LZ&qX>#Z3 z(*tY~vWlD6r{g30?XRvV>cs^|&e^PBm6Wa+OQf-LL-(~qZY+#aGfqD_*=4zB_CH!0 zCabSZm<$zsbXn{|jlP7Qa)MF2bX#0WSCWY&CG2b_x7jk@u(13(Tx6iCBNdy~j-(^Y zp`iq)({=DO;(8U{NG9!=7!i(2Gf5TAV$n2}@s!o^x^w{Jjtumt1AW3Dt83iitv4;G z_UpzsaQV^@$#0M135k!4VSg97EzFjl1kyuY7y&wUaUs+3?xOupM0kBZJ7L*-C34)j zkwopena-1`?Jj^(kg<2nrPthZ;GdwE?l4E98YbLhe&&h#G@b~0+jr<7oRPHHbT0LO z)x_@tG;H?OWyP$4gJL5O-%fETYX)6|LN03PH#zBS@p!;-mN#TG^_GNhh!{UsNM6&$)+DtQJCTke1?&~#i+&T(6C7p*?37G z$Z&>!76GW~^-n<^Y^iR}+?BFNss5V5$wMR>l+Id082I!2_b<8b+>)0TCNwB$ zT2dtU#0HU{fMgLsg#!Vs}5C1ZNB@%UaYC(JFc=W6;-OLejzu*x{JqAd(Wa6`N8Gf6gfk+T5G^V{>c_C%!&(fVxM>fw1{sMa(b*tdY4oj#5BzQ2 z!gv8t7`qdBcHN^$oaww3`3&XG^icps;8x*2-h3NoEz5_tF$<%Oh77UH*_bn2pF?>9 zwocaPna_LxZwW}F%CcS=(;7E6_>-+^HK>~`I<*$cl5Coa=Lci)jnI{-h zI8%9Lwz)=hX5xt=suuUeY_9Fqn7_C^vwVBtJCe5EV)$^pu1gRkHp9ao8 zy}Pi0&_0BA_^Z5evxY6NMw{&!`h|G{%AwT?t6l5`+3 zX3Ie*$o`+m4FW3KMPGmarAwC_EDsF<(BnE0Hr@W5I%PmIY1q6*O%byMv}2uV16s{+ zaF7LZC@8-b_uEwPUz+wj+GaqONmt!wVC?`n=P9}~K*Fm?I65fcRojloac+nmWA)F> z(KeH!s6HY}kt|KX9QKDdrchQkT5{-P*=4HXuL(zX-xR|tF*sv*^-u!|ybeQjm=f@w z2z8cEiL8faUSFl7cK7ac7z4SM4Lu4~+xjc<3xH*~{rp3(fKxQN(EP*jvUpABUroy5 zX~{Do%dt~%zrDJ-sx`@-9BBHegKoxjJ@$XrJyM8Ba(Y86!dB;TDs{zX`{m_5$Rlza zfOxhw9%bsaBV|dTrt*l6oI^jl!R2udDu1_>0vt$0)3BV{y8nVvXAH%D zHO|6fyT`@$?b~F>!M!k#w{Ts4V!FPSa6{klJpWtgNB!p}pGoX(8W+`^<#N zL9Bl&HU8da9Oz^M9gveA65BF=Dd3kwCn?ak%th>yOlviGI4?LLzT~*m;}J_GTJ-hS ztv`MKk$ttNzJ5!8^ZfAIZ2%EyQgA3s-*BvzI>eUt|ivPick#HgcWq=RLmTZtR zWx5GIaz@63V3f0#EXiO}lfuSH6Z8jkVOY)YIYl3a6TN5DKM(N(sN-yaY#5b#R1OSM zm3F(j=rSWCqtIi=P}q%X?3+g?1TL)PKGYcc&!@7iZ=)QOPMy%a@#yW1;^g4h2fDh< z>*f4REB4Z*^Z?lDB{QjB-Re8Mdi^>lab2R?Qx=7XbN@2e32DowUWAA=5zn8ck4dC;8$NQ+@iTvTHPC;%kQlnGONB?BR`+7pGx-)9RdS#Fh@VbJFlQg z!~6R6SNn$ufoW+O)U{jA1AYqNO-zoNj3$j4Wg zBsGxH?eWcf)TfLo*VL31qnmctR8g#J+En6zP-6f!gU1952rX)F9otjup^v!X0RrXj zV7TBUqEqFw83#&-?*)^BxI)N5tW%g}jtXYRfk?agfK-n(_4=^^Mpo9on}yoryvN}J z;khf!&rliqPTBZmW!{>56xEH0Ob|kd{6(aVkm9_hK|@xaKcG`~24UbF`b<%0G2}Sm zvE?n|^=W{_M@c*tV2})DclfNun#I7QsHv&Bo{!9imx`k)4Fe^CQB)#kxtVtKJ~Nh zK|x_%UC8e4VU>i4uxDSeA4exMoKrr z?>2khJXvZDj`SA%8f>M6j3bg;O=*kR9NgBmTWsw$Gp6}kXjuqXoF-id=7GXBHy)#MrO`u?}!fg$Xm{o?&BqstjUz` zw)YDc*1t$MI@XEOUMg@1#o&S1(#rnq{b>ph*oHt1A0M4c;rS$VNYP)~tF&0)2*b?a z9G<6NJ$p9e2-qQ?z?eDzxL(eR&tM@3qb`rT6^xIE-_OflZU}j@e;zr|d+~^t6I-4c z(8BtDUDH{e6-W7+Xr}toty47+ZH;Z=!w>$P(ku=o&ac!3VA&_w`G`D7=VNbdH}qzF zJO=(fNgMlc`2w0#xGiZ2^RABxrudFs(1r3!=Vh;#nC%W}jScCCzh-Y`!=dX!x<(tg zlIuas+ijUUcd581$Rb%z;sAZ-Ju1{8bTO04tFs zS=w-^aWGA2r?J<=4?JeiDSJTM^m9^R#1S}v=>?=X4Y|t?!+p1ZDGptEz-1e z=f)x^i#)lMy-EE^xuZ#L`m z^NYO+a0hq<*A}1xN6#q4*znX;J_$&@a5cx*)X zPOB?foyem`GDU7BwUtF~VA&=sn@C5GkVU~mYa-lv95f^>h9eYh;19}bB`9%ogKAv~WRnzZOY&h;bxA4k%??b*B2ahO>7^SAH((rGc zPikpA&DxDuk9|KA?4Mz;#!tL|_pm?8D8AtxWc>%;_u^GJ+-dVdptr9W*mm;743D-GEUi_Ygcgy#!yP4OM4y{dR^*&H8rM&*rB z>yi_i8(gTWRZjV^^!cC>N;QU8DP<~bP$Lo zHUj`hI(B)T>DZl!+Tx^&R-Mb)#{Q4|P*_}Lz7k|weOLlHM~uu~vZL%1vA0P1R1!kSg|`F)4hqWoqk z=-a9th6)Y3NZwM(>lNeEg61bUhvbwgowKvNqu2LUPINb?_sc8}UU@qwW3$;ZAHTXX zFBGk0GiXEP2~hA45wPy2(jiF{IEM?|{m01(C5B7O6((g&=r-f%gp}*bi>|+1s$JUH z^P=at_C|XtcPz2p?4b2-NBgJ?Y)oWT#_vs0#=!TopOuMbGm|7ehzpG`c$p+Va`#ED z6{AVT*O3T3h)!9MyOafkr+rpfb3G+7&XT5@gjkMtl^&w8W}jr_N@E@W`E#Yp_zG1j zZS!}R$Icor>Ydh){r?yhCS@BNXBSoS8{I0y)p z15l6_Zc=hmJ|m$m7Cbc_e?uBZ6z^{ ztbN^9zEd1}rlve494unv_2Q6P$7XTOq8I&0!BjkSnD2!iSD8d zr+3SGk1b+M%`q+@%z9()topeaW-Mz!@9t2l+MFbENb@UhCVc61`(yP*kELHmTLw<- zG0Vng)aLklY*G_t2WVEw&!5ip9ME<>vy?p+L>rpLH81V@+4bs z*U!3tv?1_telV!a*or z^2K)O_R;3;OYhWw>VQipoX_n`$=h8Jx~lfVsdO#)>#e-A0A%mi)mDc7tSr~Mm$rju znqTBD2k=K(Nxj!2@+Bt=J=C?e)kfrT%F0UwS-aXBCi5O1(s$Wq8VZ!Vl-LW-Q_a)t zb39Jy@AV>&g+r&z!4nN;Tq_`@S83Shw*KbVf8~#xWf%2qHRO&`&-6Q{CL3P`9ATJZ z6cATcXP9S1Htu{)E@B9kdJ3<-i(8+f$zNopru}i2lK0-3H%v6Wyf?1z;ni7u_YiEX z>J=Tii>%!a(%*t&MJ9Rx5iYTJK(Oz?d+um?IBI%1vWF9dDsbni0d4(FJF9VF4y*jE zzo33Aj|JBSg!}Wsp}rD%iZ#=N?gqM-+D}af`(wPYh(0&0u63t7Ef64b+f5@Jqw4__ zg#A%VjvaS@VYeHnjQ6r=_7QV{!qIDIckinL3Xp&&%pnbqF*>~ZwL59$66HZ|Gd_BL zUenQF{Ox-#^(c|`yxbf8>^j(!bk^;_xB2_-o zCc`Q@2~1ykCMHj(ls#g-Wsu96;Es7WJD<{D5&EEo_I2ek%{5AYxxcwpN3xsgWhI7z zSJ%eidI-xgM42>H7!0Z}^N@h?Xh4AE>eo!~vG4WT-3PX4gu9%ys7@(3{aY*Wf#?-1 z5B8n~lPW1bkV(RsaZTWQQgot1$rMkye0sab?D9La%Y0jP%Y9;2vLN_W;$!h=xf++8At1nGrKlh8|cxJTMGd{i+#m#7z7E7^xyfvZ#GeLQi=ry+7` zDalf@_o1Gy>EkttAv;ad)6(>Mc-lP`6&?LKj@4wSc-c{sxB5pD9{fo(#S*b7VPnuj zZGY?Do(poTRDPhF<&k6l4~EwEoMvx3TU}XY$2<3K&Eu^>D(wE;fX+mF)N(zo+B;&V zFxWBON!U0N1yU^bpJ;o=q+~?!&{YQkFgBd%)Ye~n%BDfE2>0vtH#D4fCf5Pql|$66 zCHfT+hvF^J0Dv|fusadD(}YQ`d-LYC4JU$W@dj$V>cKy00JbLW4|W!&cb%B6k3o%> zk=##G@iwpubf)s%@dH>P`gs&G^Z_dJd*3Xt&Qbs5$9~qu$*mTi`kxlSb6{pOtu{mc ztg<`r*!hcP)U$+xEtGxbQ4{I_qxO+Sq$g+^X`v zG-XJF8!@IRK(pSaMVSCkFxc~R8wMa$Qw*FNm~UmzIJYrnU^CTAl5p=&oRnx4?(Mu? zN59qhD~B^@1Uk(-yQBTOBf}=b#0njVnP*JT3(VG$y!TJfPWus5{q0rU$W1D(x@P2^ zcnAK7K&Z)jFxL_^l`%Micni!3-M{`>Nc zcI_l6J8;wc8@wroclz$>f_a!9LgWa1UX1>Ik*(Nl6u;8~C`r-QaIkF>gEzfrJ>^8F zG;7b`*JD>~QL?$Q_hPViNOW$3nWk!?vYEZ#}juBt3ex3yN$H;ck1^)OehU+@Wasz$?A1 za_zX!al0Bd_&O;n=&Saab9RYA)>}L7UlFp!@X4WodDlIifCmornSKzRSG^LbeGovy z6~zbCCYPR9ZQ1fRSEOk9&_WFva$|KD)iS6^0ka4n*c(|}2>C%0oM11!E;qBjAMCw& zU*xqeS|f8E86z!#UufG2>5+Xja5T!d_`tAs+`4_c-s3)sGy?Cwd>Kz`*+fORkHaE}-jF8zXl?PA&I-1TfVfg?JX z{(5y&#nBMEb91hpxEG*=J#6- z!JZ2hEoFl%nCVO~D9%4O5K@x|-1pKSA>i0wwGuDlM0(x*cF}j@)^yK0p`|&X{;hgeDc((hfGzbK4N*zG^LiVk6%AWCHMYx&n>#QKW$x$K^-GT3zTs{ zbrEAD651|hjS?vl7@#VSAbZi=6cu1Y!h68xqwntW}iaD`)G`u zI^G@q9(5|B&`K8Q^|%ERB9n>(!bE96B_H;UsR^`^}d^gR|Y%c&vD`^34xcb-EF{uN!ds@np4YzbMoEGD19Vb^;SJi1(MGb%UY`9_Nr+4=&?(JuaMgg-o>T&A5$sWyD zNTaMhKxv5kkc7oG=bqMR-MjAowNApSkz>3MncmORHu&yw(mw1}-P!DY6LB<;jVVJi z^29nqBLw}_h^EZr^YguwmPMe;d_84!>iHc9g>kJ;+V&U{;JUT=^S?lxX#(47KcCKshL!=w zevc^mb?T78gH=1$%rvscDVtE_iz-h9mI|`>A!D(kU$%clM8t<)4X)d&PMnw#IpG!w zP;A(mdVXv2Et@M2pMTZwW?XqJRQt%hg@smOS^9TIK0eo$;dN;CS~7s*&~>AIjMHKX zmezdEMc>YvglKZy$K=ILdAR@dYeU%xblNuI=b3VXJ0CJ4N;}AXCz25xl^-2I|i6DijI~ksDZ~IJuo@?fOqWYm^{DlqUGDSaJ@raO9JVJ#XNB_ zs+OI*VY+MmgXOiMxkvO%?9whX^9_0BN=@;-Lg-KXHq~v=M z5W@m&u~ZDrC*=FzwlJ%$<^lMcS{tABJ&!`vqe4zy1?KhU$^8vElXujR#iw~TWLSu(1U z3x~+DgO{E+ee?ePNDM0kTKsYEun+`KDf5hij{o*Mf$E7d8G8VR{Z&=#>!vXJ{#E#q z%$@XbI(in-pm1Spvo4RD{xQmbt@fu)tbTtOWE#3+(BYqpZl@XtI3xu>Zm@cG?dNvh zRBw#%2O@9f()$@d@nDKX!GF|t>ZlAB5Rc{F4)soN9YLMR%KOvs3Tz89-8W#w{GwA! z-+MR5qzm|>)wQc48&?VBV3xVfXn|M?oha8fcUs!5t9txAK*6#En<)+!pqE?=j4X@{ z>|zF((1v!S$J&4R@E`j#dIeph$LtLDD0*2Hw0Ds23lsG%y|wd;Yez4By3XgR`7-ke zn@pN9MlKFH_{PNRIc#c~N4I8Ube_xfgKy8&P7yX0w%Cf(O)4#c`xR!@@4wAAyT8L^@)h4+*ob%wR$WR0*zuRy~zARwSyj(+^Z&`8E8 zR!rZaIt!JH7EPck`1kwGO2<0u{kW7o1i}n{lMxWISPqGR z#Nwrg@Z@MqsFYY3xE~cj%r^7Z_IG{SgIx_H5t_Eqb5;hzEC3cVW>@5joBRS^cp=!S_oHaaL%PfG z#4_#Yl4noDLOYKtJZg25#74=CI)<>PWDm)*HHc8y2;PKzL?(O}Sn7$4JwQt*Z0BAC z6@y>zbe6XsxsAR1We;qifs>K%xA`q3!MH3!k*$_;4M9>Zetlh!#JIqOm0S&7L&4<>uX8op#yW02R%PB415$baul zz2?J%N6cF6H}^)`q3l&YFN%+d`9v(RX=!OD%B4Skya!m>*Ks)Ov8i{L z9Sd)2IiB1zi9#ytcNl1Pvfzr2yUXm)B6f9N%BbLt$wYwPhXzp z3-%8B=Gt~5Ic!j?=~oZuzkEbgZ`bNRfe%Co1I9K`wLMBi!M~@V1v}aWvy;m&l5gqJlwrQB9u~Quhwd~_~6&8mirHwW_=x^5Itj*@v2{E zle`z_#s_g%412O6T9e*wCtqAdvI76}Jx7U06H<>dwFP!S|60vwA?j0Z37?!_-INp=tTDIPz5f%fv^ ziX&p~T&aGjWp2rf*(TDi-@Yy((KpxA3W6m=Z5*SH2+l;Uokh@8#nMl$s}ZptSe}S< z4<+gjhEA~ME|3T&X^JeZa^g|df23G(rRHkf_OFL@#>wC5u2@^w_f3&<9cxWhWtDB6 zq7JyEpIf@YV4i;5PX33kRc&x{^ZI7z>sv(MK1mCChTl8#UZ&7&Ls{ zFu;L;)m{G$>fATK?f-%PhEn}y&m0F$>Mjl3>8G!bcKUjEkK44i;}3)6N z4kM-x^ckPoSh!b!Nm!4TZQG{td9nXTyGIY@Vj(MN*7hAk%6(I$_EqT}Q=i*wRBnck9e$1@30czcsnQNvt z8gG+(`eeoBL<>}H(nG+m(%~25midoqia# zz{8JIrvA%r=uH0kP`q*I`2HWd-aD@6zW@KOWMwBJo1!u*aUhk9XrOI`kdf>a6_SVs z8cK?$LOKXZMx>olQD&$#gp4LC^1Ht{zQ5~x-EP-){&SwkaZ;bp`~4cv=VLt&HcSo* z4mRX?#zAB5wFXg2w9!MSw|MA-g5-IrV@%&(OFB}el8i;?2>*&|Vg+@K@X6tg=|$8J znZi(SnwQq{HPF;S){CLday`I`j?Rly8aGakrGBsEhkow^OBHWRVuIl}zgS4C5U+IAIjGM0lUEZGiC(uX?`$e>UuX5?}ZB~ooo)$n%0%S2d%YXPz z1ZvzHVgc^I6B!xV?t_o}zQ91MFx$6r*F9AX)+XsLSu~}uRoqjNh9UUaxju4hWXlXn zT1NCfz5T-yrR3(V<<)AF5}c-AT$_^_nl$>z2)l8S1h7su^V#__zuL+opzN@oX?MA2 z!<}3P_Ij2-cj}&n4DS{A1`hG7n4@8k1>wY77Yb$y#c0SdI3E?M?n3EsiIZrL>4~oO`SHEx%)Mj)|%$i3zQhvnwm`Q^i?>~KunF@KGQ%5oeNc# z5=R~n1P=^hsO20JRPdMSAq6XtF_r9Xnhz?N)lB{}Z=sO3X}Y1~ySwVw7lqE;Wp3US$--rVCMh;ibO;X>&%P{X*6I$S4u%1{kgGh7sO%o zY+rC4)UFH=D+LD=cn=bjLxk_`b^m#u_ZFnz!9!}Fp6Tp`#69AK0#ZG;x3#G~OAFWd z9AFA1pJ9*Yykpd0Hvw^7CxT4KZVZ^6d^aWT=R=Yn|}oQW)!oswRJ4d z5*xnCn~6HXY5nEp?M!PYp#Z|@txgiDunIJWY?x7e{b1#ZdpU<4Cak!x`qeD*>tG~# zt|CIhs;l?NP3shVjAF#xWS|xEwS{XFjLxPR&u@86i&Q@mej+4fEOHJC$#(C1D#~{aos1b99*tz|7gq_byKlD3AJ!xm#*w}6N z?%x-9b-UJ(wikT={r&pG(gbvgWc-g7mH7Jg{{~z;6%v9p**+|IFi?QRcJBi?mjr<> zxH7&_^0DKJQmH2oP(MrUi#UN8?LLN;J_u2%2_-O#2Ma)#ir_J=$l^WBPFp+p`oBw4 zitkR3Wq>_-$`OyUFMgl&Wd8DNSFUiJmB;+*CMo!OL9^@IR?J>^8@dS|>?})60-Jab zG8v#Z#H=VvrQHi7`^TN%V`Q{lPFYU*e!z5>$-Zi9=dbwqWQ&^4ov6|c#^u=5g}fAb z@0-ClM}V}hF;6X?=4E|K7F#>qx{FSg+ai%6jW(VWmELCN zl`W45INa9Mxys$6$Yp#^Wu11XU)r9i4+Kf!<@6)G{k7RM3wDj~yUn<2)`Am5ZAo_% z)2RqM)E|C~nDOw(Sos(=NmIv>m4Ge>K5JvmOUK7D_c%iN7Tx*%=&_8z4+Z2%?=7V5 z6urYup2f_rdMM2;F&KK3`cg#FOHh~{BF>-l-KBi&RB5-Je}yPhHCAzn2&ratDnx@w zD1>BWe_x(DBaIWr46iVLW#DMZn;#!EcgT2tBrdKRuj@8hIk_2h!D!@A{lb&}f3(A1 zoH`=?AKq117Zw0=(dU%ssA&c_w)B2?f7RlaGgHP_jaubzw`<v{C$gU_$FnZ*s-^6ufx^Amj{lSA!#9Bs&DO9n@kadV)FJEs?Q zpEl1-HINV7mzxy@WG;nk-J4=`?ZR^gL_2>{#m<5C&Xr!DG@E$J!d?$o#)n#gdsvt! z@K1?(pagk$z9JcR5W)xz+FfGA(S?jLxjacpNx%6O;5I7P0#0fy42OBD3|{lGRMO#~ z!H}`IZK1?|4gHxjb*mP3vT4g6?I-uBjY0mivDF*5N6L7JUfRit{oe`d9do&`Ti;BZ zg2{I|LN1{(2h#)^?xwxeedVJjXDllH0MXe<_rYN(tEWQRfDXobc!iD7oC-1|L%|L$*x8^C6dn6>E>3WVE)YEzgsYedO~vEr>^v5j1B*O}QD?pu zc_an&t6b8PMb;7;lW4=Wl=!iJat|`!IrMq`zD=|B8vC{L6_};`pW4gA=Ec3f`kl0D zYu@E{9JetlD=kgRYNp(TO&@=bbocmCCKof&LN57x@h(vfu(`4ZCng}A5o$6}rm9yv zITl&CvC3)3gkQts(54;`o#M`6V6iK`rto8&3Jn$Winw5hQJjDTfuW69)j|6RI7|W% znDD!Bcrz~PERBD=SF=w|)R>=w%=HrbSpH9gdGn4d4u;D<4y3+c?nyGkw?A(?l~`)k zBiL`}_BAe_XRo8H&hk(0%!@w znmcj@)2%^V&MizBj+FG?w%Jar+>p|66J)UoIrP(ti^EvM%Lu#z2e3XD?}w^kQgQOF zS!gF69{__4*4F+R>$hLnvc?^rd3rgZn(#-Xo$UC?=_Q6zF%3ueSAzxLeHIyew4Z`( zK?hiKWZb-Pre479?|!#K>`*iny!DWA_O^p%E?OtbpnK^0T1RiQ_p*bp`?@U&$eXa) zGRVw*((Y7E9K}vE-@WbMhboz9;#rH{IMm$Yq zW;diWuxWpc{S~G-rFOH4p5iv2*F#lR2JEU<4e00*z3z6}cirAU5AFB#_t4Im*1qSe z$N}|$h=OTF4-^I&dYP9iboxt9GJwhl<#De8*E#5_&in30fhNJgYy*rjU`=+&sf1Dm z+JRw>?f3r*QZ^e3R6ub|y_1%jDhU(M-klCK4um}#;j-Q^VKi8Y@xdqF?jiEB@o}8Py|I74Rm6r7GU` zQ5(o|?;om=`y|0avRG9uHL0!D%!X~iSSF{b@(UU{Ecd}<8lP!(7Ac|>q?X&)a-WNW z6X>VvhKbqhs?S5A@KDCHiUmN3TDvO+F;jQMki09TMeB_i_v+OvF?mKxUb&x+^Y+Li zyLhZqzTBDVd^~UDKVwxw2kl(iru)#cbrspFmA~ute(rr(G1leZZi-6B|3?d8e&^9q z2Ibj?CtKlptHt!6(7}Ms`)$imFho|58_bkNed># z;^gBQm}%kJj@;LNKI8#4dPZ8>(EMNeGiD?oUEay2XI54gZtN4nDkUrHhF0V2?e7D{ zKvkjrqEJc{mr3SuDTgn9iwZW~32PYDg|+EiwfFnd5zgW13wNIOe3+A#I>T_v4b`Q; z)A8Sn$r8MiE(hM9?PYgAKA)E+=ytTh%qUbKIab`scXr>fv@Zg2<|t#=WXEUMEl5_vzF2*g0b+oc5|eF|>bR>71CsJzlUo+o?pMECAsu zgxb(f`u*XGvxltD|TLo;uB=ITADYrRAqd=?7+WJ z!-3mS{k>P4J$r=s8Su}h-hqP8Ibz(}@@!=C21E&cZ(*8?=lhK=VbVHW*C z6N(oDJXwnJT7+z}Cnv!PSgvZ-S5=%6*LCrt_l0GCXz zliCf;^R)I`)uoQoQmK=@uKVQ-np~e8*HZa>jI#CpS5D@)n`h6S^GUMrnc?3o<8>{5 z43e?&koWAp>-c*DHw)wk9F6ExAHA$k_wKGF?KOy?iu~8JI3?C5-n?{hjq7cP;=Uc5 z5AsN|<^=U#E*#QcOuAK~ph z4>@1Cas{BuMX5!;gS5K+zWLhp?`)Kuf^~zVxCA}YeNasf-hmOb% z2~9igpw55EmPMAjJNbEBa`QL}E{m-uCC67fGXVXJf_AIOTG&!{pkWiI1rGq)PG!I^ zwNd|evAE#yd+DzP)2zwYLOZQCN-tAgZPa$fs~d9nYm5KE+)%e?3)JMr8a?SP!y;#W-FE#)fvv6iic6Z$t(Nz0JtUR-%4B&jZS*sLagC&DFH=ez zR}k4>^Mm|Bbw|YZ*>+!61RQGfsh{M+--CXI_rJKSb7@h1;9(1&;;@Po)rYGOZ>Xxv zdv#pyfV^SfF$UrARbr4OMYA{(Ip$`?TKWeB%)sQQy!4JXT{(e%$JkMCiJGE;&!QC# zkMp4}qlyrz`ruSxFrFSB5o~Yb6MaSHCNG~2pw>j?;mH&-0u?|2DZI@T`S^5|{blaqIS|N`QH;52!%$xLh(Fcv z*HMIPMM3z0l(fS}1HTSlh-!?Vh)ikA#jA-VO*1-VWX$3B6d|^(OS+5q4q<5)hR9oD z_xo#3x%$rg*Cjn4@M$KR9;Y3BwrW)CmIUF>02RfC^`{gJESH46nJ!g z&B0@*Ox(ZxENUAcI>O>$_0za)wmw(iwEz+JUvu_M->T&VH1#=3af{zdo_M_VLGN@e1yePLX@}gnS61< zW#r4;YyOO)L9*t4+^}l-j|5lYFMz0#p^7on(;uw-?oZv4ybY&ETZ&~mB#zHjf}+6u z0%v7xHGvT=AS^-kgHdhjZ^3)l47rVC?&nM=DlNavte- zW7bcZ(cYO)pIUV|D3#jppg2QQCua+BA@CWumH8shX0Z~%ug*41#LkB5hjt_8Go0GQ zzmX~JT0n)*jC(wCjNH!4nur@vbBO&P`+1I>4nOZ?<{#>i)zIGc@?{Ogju8kZDW2QX z#qQUoTGa)K@f|oT`%@)N(n`BU#v8UejeQz7Wv{zk>Y}6BE2npxArTXNafxXKabwy$ z?{A)3co+*E1`Y9($c*5^ZZq%VVowYnBDv_qp?Puhj&N`ELW#@aOS`w6)Y-NlPa`g? z<8U8KW~APPlEOv*aO=4f7NsYz6Kk?C&qqhnubpm@pW%m}*IN`^4gBOg6fZ|x%_(Y# z#qJz=R1zYfZ1QAuK`433srNv&{KLZX&}|^fX@>(#Pg7Q>0V_=7aPS@Kb)FHTni(0N%gaiKwLnQ zH@&{%uRFtKTA!S6;%1ROs;JZ5-R_UmCuoxN%#qwVZZ?5a4<`*+*f zN7R~bJN*%uZNs0VH=(Z>Aj1|EJvFVp7)8&)8WCa75-` zeqWd_iJhtQ?p+PZhCRZ#lyem)Pwt0}k3vGmJ*~7zTFWQH?xoVXu~Ff1OKOgm7g=;T zn0eyQk(vGIemcZQo8e)w|}7I*pY6^4yE8fB^s&x7B`qR#zZr~h9<_0&~&gdT#ls`6*KtDiaj(a4tYOU{^pa`zd0Lg=w#W~><>9Rq5Xw< zBYVwKS@UPLk5}k|&6rOjj&gQz6JFfDleQ|-M?|BqrS!c-ir9xfK7*#b6LV^buc!5i z(EIbsY5&wlKZ0aFhH%Vw-bIT}R$fk|-=2@a zmH!$cYSTAwLIDMU0!dUKDS_k);H1y`xTk9wGNnm}1;KAvMy`p74{E0}Tmt4nP$}`1 z+F>)zyh2fYL0gRlC6mCxc+&W4uJ>~#XI*?w2nhNQa2WorIp8Dj@iE?1!x^U-TMhIu zR=w{ah%TgDZzeXPsRA9i88=nZ~<-S?xU z%eBQDXYvp3I|go&A}IGREeXJ`&eN=EgO$f58{Ds{|0bNI$ed8x9OO)Ib93Dh!V-Y4 z#QY=z9xUQ*bQsW_O6epK7UzSN6}A&dO=v|V1bpuqOvbMG+aOo{Ui_%Yez9YWxXk|+ zliD#sJpETaG@WT+=5EEh094_|&K9C~?f55a9WFi@A3Qq?U`28P@ef2@u0r~cwCyk= zZeueC6vPzTWu>Oui`6srKRq1z>+8L7smYE@qU?Y)>HUWAJldD?(NVPchlb9fs!&X2 z+RVGDQ!xs2fClc_>x{{mxcoqAQD>q$l@O^I*49u|{G7)@8ViWd5#JwLJ?hCpN ziDG(g?i{b15A*WgiB>UpF6|}Mfhfu1YYt1R8S|$EiM(;_l;-!t+LWZAakmgZ;O6%; zr%jckb5?WZeXvGJbP!7wQ7!bUy z zlo>It;<>)tM_Vc4U}#SJSw`h|yPf}*!%^mfOad>X{_l>c;-w?um>2RS6t4n_9wH#+ z!i5W|*VZN$4R-zrt0y=W=Tn+i(%X+Ep>>q|(ox&JZ0HVMlL|vKk1(T{g27fdDL0e@ z?@Q?$e$0Io{_nDL@Q$-l*Ff6hvg8t3l_2sS|kZxhnTT-or{Ya9b2db_Ql*g)B^Jk3U4 zA`v>FBw_b2FRv)M$()I>@c|hR;hm5|oUFR0Kg9W+d>0io7F<|_3AlcF_tOwK4MsL! zQ|j_WOE0#%25FmQj|%f1Y}|&yhmoD1S#tPqSy&e zGOe2|nm-&%yTj8bw-|dPWz-rpmO@EOVNFYL#$#raEjXd@=0Ha^SpK%z^Y+9kAM&%K z14JY`4(tU)Bl7ixWR1vn?s}Xbp-)U=%->d5ccOEjEQ+9sM_c>gC>Tl}_u@7nNbA;B z%NzA|c|5NCd2X1i!G6n%oapT?ulID(^VrWFeWS;pGyg0aK%CAZ1Q=}`bega|!_R;l z>!YYcf<`sm=8=Ax$dZB_CpqBZl~oV$Q7;pHYTLF#?S*9#1#rO;i!C*KMy|aq{8~IY zqk$gue1XaP!?LPnbykfH-uFC8H^h`DOMWSRQ-pC3XMZHr_yz<8?7Uvh7&Q>pOcdTI z36d*POPfF|HKD2moff-+yP3_{8)-Mj01L%b1=bcMC)W8X4jo$d`^Dt1my|Cibt$=95Z7huB?(p8PYS(8 z8hyI0l-2uJlxIkk;rhoJKlb;zHFN*%ok#l2xvOB7!AKHmx$gR(EZasWd~R8h&~skz zTYsZYCQY{c_E0L}VZzrIZ4vN|W7az((TU$nE)Zeh2B8eewmXU&r-6IBV~L%$_wnx4kQFS=u9Z z>&PCp-N%j2ao^KflC<-81CvMLkkzYs^0#-g5?n6KLs?mw7z2$sQFI>sK?3SKGS;pa z3q=@!VL?vf8xm1+B7>HJLy}@_pkSLImAZ+&0pJ4%I1HH=?Gh((#@!Yx+ zbPO|HFz3fHU!E5InAht7V1G{i_+iPt&v0v^_pmA&qpUm_QR~m2yV+SNUb>D{Kh|1^yJmV5X7R6lZ5J)nj|jn%sCf*ri0xY%y)a6fcN%yPIr=nn5Qv}&W~ z(I35}viJQg*&K6tQNU!~USGd>FD(igl)Xg9@V<`%aGBezKiZO-d65^~nAgHb>&``i z-|vL_a<_&P3~i&DDL6%BE~oS9VOJ87YJgA(ng6{JT>fTa9pcp!VT_+We|Zy72&AQD zTg8JB0$zY|{};i2ep3O-VGW#BY|rZM{{lQOX42dq`liq$jOq@Y>%#$E?a``%GLL?Y zVj#vb3O5Omu!$}|uggs|zwSPXJBl{fu$M9VcaeceK1>1BBC9y(_pg%QZ>y@ZxpW=; zmvmy3(#1mE^($8FXXXiAC{ba9T)87V!S2M)aprFOk8Cln2SGnEeqC>D)t*P&zbus5 z3A>mHK`2auG!sI($FvO2KRdcck^KTsJ5%cmOXvi#y)ahomjQE9`HFOkn>#+==P*&a zY-W0s7nkwqkuS&Z3Ol<)U=psyGBXAZ3YWF6^qv)CZrCfE;W6g=wrmilK*(_7SCA*8 zb>U`AAhHJN?UbQ!l_ZD?`dRY(*C1qrDs?Z?2g!bDdGwzClEq8p_rE*)wZ;{+%T4Sv zZjkD@wNE^FFH`EBI!bSnPQCNn^N@ALjgZ=#UxRY|RbK{go8_}$!M!iFONIzAT>a0j zyJ>V$zf0LMk9%#r{RXcFxxyUjx>RIY@izGfr?{CWRv$1kYS&nBW%Ip_q_p^B0g|&s5lJ{W#%o z=@m>J+}r6WZd=ZP40IL_GZq8tK+>@XLFGmYGnTpQ2Pr{U?YB`5~XGi!mB?&anRQ-c8RMON^6{uKB4wLK-{wPeLsRFOgQnb%3~W`F z8F%MsaI8vy>)0My+SZACVyf3M-i=H3X>GSra~m3JYD&pSRvc2h^I~T1Ftrko_I;n$ z)*IU)LZlffAYz6$J+Gp3U7Tcw5rcY0|edX<)j@lt2V(`{&+w2*- z+OAzY_VE_NmRKe+zeG;%qNFb?7M)1sRf`!Adc!L6KyvAOq(asXr$G_%>^yG!H#ZZH zDxbel*8JtxSI7csh#9~UztGTJ5Ic8|1u|$$m~k0MlBz*#v^!}@fX-K#vyPsS=?5eI zXaCj`S{%-)Zfn-}5H2rNX2K~TX85vBl#qirUWpvy8Lur?DW34kHlRa3Tujm7%VA?3$gYh|ZZZ##D^?e*g&&^^*_ zcGLbcV*#j2g;uZC#dnU)4st_O_nYt2R`suVvF2c|M&*DN(}9d{Z2w?>IdDz9SNW?6 zMrDz`^{47)CvDnYDK*qoad(B;Th)%9DLs2?_6jsAbV=B>wEyjv1^&K2CTp%9wF;sT z*PSrl3s#h7r46Cscy@Sujh zpEqYPMaM85onLWa z;{kkleELwu>7BH{<~@{$JyOEl!nF)MeENGW?NBW^jHZ1-^iY-ZcujhGtwHbBjP4BG zYGH2dyj)R9NrMrc)CV&-=DgpiVMpM$g}3^zT~maR<>PE)oB=COT~o6>bZ4>TxaG6y z&(+G3&Z+sfTgIqe{{AQ6eos0;!Ah!iHTT~Kg6GScp$jQbx1(rG}dScxdmQ1-I5vw$Qd@o3Z zJA_b^^O!-e_Tu)g37}fXt(B^oYDXa@>Ul6Kt$KQNhm0rmffk8tt$^pvKCK(9+!qPN zkDTKxH^i-FP*j|?xo5F2Yh#y1G^MnZ$<|L1!Y276=l%$s&dwOwe^Ri*wl3=G z)^|*K1?uhdq2Ece^|$H2J6W zhw9SOQAO7hr^U?sJ-Kyh%>yPziQHo3e~2e2AYYkXmz7zztHYm9baLtv<)`w|6q>tZln5b-5?5Z8wQPVCm??IcpO68K_ zm6e(#)Or8deS9W8&JTj8u&V~kH8&_j<;O0$b#40`XUBfZb-L!STfDv%jfLMGIy$fD zb{tHx|+li{WY_at?A5 z2W}c#2M}>F>k5Ks!P@vS&yv*3p6`7XxYA%#Y}WUPT_cod8a%rgx%YRg2$B;`5bJ>1 zWJ0w9PEr_K%#ZF*;Z^_MGerB;ld01@jP22rAs!z$H1kxiM7bu4iOpBO+X3^q18VBxOh>CA(rMH?=t*Q5@DTB^N+^%VSu5|TE z_G$B2aXO%3SOs!vqBu}W)Qs_+LZ7FJHOo8$7UaXyE=KHoOH`8t&} zw7Suh|6cBY>EvPnc5%xN1!MCohSCpBH4<0%YMr#PIj2b|CE&Gj{Y!K9ckbMo7@jEL z6!TS4&{YscL9%04{TVl~5&%{!TY-oc?#^8m_4a$Z`fMsOcz4_K-v@zbpSngWmF@j; z_4>^tBZjY5?c2B8`=VStblWu4)w7;%(7tp&rxjqscg8c6K0+D|Cld3#5iW1IhDMJY zi^nuI+M}&M&7v|NT3=ppL$brSr;S=txjQb!7t0)=)q|cISM~0r5nw*IsZdwZ$Z!(S z1HW0ZsDudJ*>mT1W!H#eT*war&joiOCzH9t;zXyG6ErNIxdw|wjM81s%0OQk-1*crGD;0|^UG=cNlfF6r>EE?joqN1WZ zftjFE{9yWX5tRc-r_6-Zx5#iB}-avc_xvi0&QG+0+jc$jD$v8L|b1-I-M)5pY8x*UsLayL-P% z;`zcGZUcV*ycK_EUL_?C3V`u9eYr_+TUWmV`VcXwGxp3F5zke7tT4~tNnujm2{MI zCy+@ZbQO*i>LODdDY#4e;(-R2d9v#;9*k<~8&tW5QG@RyqKp-ibvs%6mG!#v|Ge9x z^j^_M=j0 z?den)#+%*M1_hrU=5=dlx&r6OCelnuM~&&mXoUywH*p-w#CFf&EvQ}j^4c=vrtFzap^k7LKx*J;TF8*8P1syiKf8r$jjb6fC!g>c`wPg`& zwR}PE(M|Vc#bwP9G>w+m4579=2K5IxD8cxO; z%n+bUk3oY-Vs)E)&KOw?Y<)DW3l#(MyxK38eP`y?5e8D5~blYhpk_iveV7q4s4eCYv$Q$ zs+72}QY6-nNep8nNhLoX8ue>_xxo*Ykh%R5_67I-w)DuzR4bi<7aY&U&?h#0TKMzp z0yaBR--2E9Y-|Sdv(oR~^Su51+m)Jlcg1hUE9-IxUEfqWeQwCRof8W$YR%HH&F^U` zo2TO#y!eK>-6#epd`W$c_j{@ppbP%mR6KVMeUc3mw z61iLa7T@sr*dpqZQ%7ALQM!YW<+OZ1_!L-(wsjeKx`=65?9+!I>(R5P#f%C}W|{K_ z>w5bp`9hM4{obWrN#y?O#eq>91C1oM~5-GSI`l=){rS@#W59JPQxh z`IWbosz}NZ(>S4&gB6;_iezm)M^LyiD8mP24~-hAna zjatwHhSs^9Zad@E-dEd>$+sRJIz*Bk%qu;XRU*d@b|^X;2mcLX@nY8owL+6QBTRx1 z?R)6j*=g*-Z<09ys&d^FBuvIg70=1vbG~*$FiQAtP~5m9=Ysr`v=C=GB!&Krj4opu zIVBAZBAi5S=anlW%S?h7*R~XxWg;^gsJH#K2K>35xBubmx@!-iCnMnC1oa0vix{QO z;}+t2x&;wrOy?K>(OO8m zng1uq?D)8K`PX;vX0nor?V%`8CnNBK2GSqUW}ymY`}t<1W0e%~h1@WbbH-yASbsU6 z;bHiL#OP=^IZV$_cAPkV95Zb@)xs6JeEZl{CJ%(j5Y}WOQ$$()PFuXsx7@KwtyvBipw{uA$;bu;&UG#!&}vvor`1waRCMQKWk;JKo-aL}v%4HFbhs8iL;4h`TbzpuXnjSfeqisgQSZ zlm>k)oo;Bu(^-DR4J;lLWF3E%3>{91eU~rSTIl>vcy!Ke7(KB&!Oj3D0 zpts1!;Jo$p{Ovgl`8}V`G}c^++JR#Lu80^m3%8NIlGpbiHNYTejP;OzkN=i0Zj2rp zaV^t{E9CW@{22exNj3b;G!*u4dJ zc27d!I+LlhqaPL-wLp1%*wp2ZdVgQUxMdM8*G=XQR+gm8dB{m)`?On=OOu# zJ+jKTAK0>|^^kQ#jC!Zv>HcC0r;y(GtbdcAmp2>NP_i%3!Emd=x+5V~QPGIO z({#`5h>FTeGyvjC=5>@o%msW4Vs`4M7iODew3rKDD%jaPUBB%X`8F>K9Oes(yd@ReoDsb+0rQ>b{PvZAVEWMmpC_v1DTgDsf(+M z_Qh!J4J9vFOn6P$@5tz&I=DksHLVnb>~?SUE!9(Ux$^tb_pqOawLK##5I&Ky4KlZN z>sAqAfz-kBoasL3SwWiFFNIg<;uLaYpi&V|h#MpzNtX)zWPF`TI{30efrv zUYHUwd*06&0+UW5@RAIL@zL+<4?vK z2f)x@!S}=>u_`cjTE#)3DxEGboSeewgqRi5zlIZy(EvPFG3^Izp@3sb6zAM6^v>A@ z1+s10v=NDk+~r6#qj4djz!yn8eZzk{TYj+@w^5rFZ<6%dteC!DYu?Ds>(E~K@O3X6O zmhT-g^pT^~QKl{OAKUL>n0qQT#GnKuh{JBTUpgLCVDMk*v;&;>&N&x(@kXVh>; z)F{G1j6y(Ch*S8#OvZi362TvMf7spKz1K;PFn|BLoborsJwk(PKi<~KsGArvx2Yry z_S6DA0y(ihCTmeda3A26?2mtlxoPB%X(eqc5%b=tD-$-A=t5u3fwmN8Wq|D}&K!V! zVYI?eJO=<7463l!3iOf%Z8c2wkEv$TBj*hFUjy4uw6%IB7q@flr7ceu+f;77@g!|G z#2ISJkxHX!=;xguC5^?q5_r||)3)J?trw3uo($R>6gU6ukYjyy3ie+F%Hw0KShI#* z+)<4Eg{@;4n81mk-v%`$(|+~G@4?rR{2eoqp`>M0#M}VL8;yH(%P#PER#jtlMm=TC zjFz_#-~>H@9>pjz_9UXHz`Sxm5#A!@{D_4e1X6&Y06?w7T}mr9Eq~*EVEocS+&37M zm?)j%i#-o=twYzYXRu5Phzi|oX`o_k&_6;302D^?)plG-5&%}O9&Npck&h-XA2c+3 z+=K~d`5XG*N&t^o`@YtTVHTN)SP3u!%w@|U2ny93dw<`NyQ$=;1IC&PzV&JS5qDwg zx`Bz7T2@3netMxHA#{Yej**qEpfD23P;N}D&p0&Fo<8k0fBt+4WW;Gm`Z_RFmfs+{ z6_g1n8F?bpM|#tQzi4Dt#oJ4suCCIY@)9EChNFH@RCMKt%0E=klPL4bt)n*<4{JMv zTZ2(NUGCzxfH$lplH7cJ83n)BLncxEKQ;H&O~7O5UyuM8${G-ZkZ=rUceKhgwX&8Pn*@;C(GWj$NCqj9O zAWSrS(1Mhc)49`Q4y;a%Lt@?&$=%R$5T7Wt3I#j3T^OTu1^_*O+$>(2&4jEhI+pRPG@{6!y*^vkfX{O3}Vn#~MNb#b|jZ!iKJBA&gHe<}3hk0xTR6{CO zEOt*FN2u-E^R?NQ%5ox^fFn>YcnaCc*zNN{EJfgG@%B0qZf4~bFU9h~uL(H%?y9#H~w(Q<-TL{7DU$ zpN{BCvCpg!0d*o9g9YIF&hCDC`hLLKHX-Wg*@lt|k+rU%WFTLKse{drBrJwJDnbEIP zrU@w_;?jTXhxieSLIWot>LJ_EXb1xr*Q;(}e;@_NH-8a@Bl+vmBx(;Oos=auTyS%yv2x-UuMkPu#f6_t(* zy$VX&yw8S7Q4=?pNUS$_+vj|5EeoX$FoDMMq^stO9 zDJJxtV&IN;-Ga*d*=8xNJ?iSVNsiy@q@|%@A$h>#hnEQFg^99^n>WWU83Y6A82hjn z<7Y^vD4=e6NWbj%{BLuX+KHB}`hc4D82g2bUN%I-vOKTVljda0e!Ai3iYgXWdm>4w z5_7qV&^c8Ij(?dTB|tarp!ZRL8rBQ{Hi#v0W22bkOfgyqfFov}+7H?@`q3sWmgH*s zwt^k}A+oHpU%gtSaEr1C0lrw#65=2sm0lD~5aRGDk|G?M-Hk4m=miW~#vRyuLIgzq zx`@;j=`~!dm~hEaM7<{kbh`{mu%K^5hmJ^-!BJ~`BUtz3r zK~(;$u~atH;>Kk|))aT6=Oe;9===b+B{wtZHL0G_bN1#Vqi-OkTO?gb&@$CtXo%ju zL?{bD!u7M(Jw2~a)ER=$wgSp7_Sxjpw|T0eaJ=3#kej z#;q6M@2|zxM!hJy5|fC~(C{Y2Ty>_coaV?8Hz{Sdk>LXq@6*#w);ls4A%n4%A|5Js`=yv@Z9=h4>#fVjCywKN6Rfbr=IB$ zwg64gTGqSCO=ysWOP}gXBnFbI4LT5_b%%oFP_0luYG_Ld4nz&QS8rvd^j z*Y2MB-R9MU28%+&oVj{OLJoYAJ`*02$HFjnwvP|s!lKI)l2-(4=u#Q_;Z2+c>6hWGpO#gUDe!1eUyQ4f#7$XqE>h~QuI z?BBk89txdb!LN1?7~rgHq2%W0XXY1e#|i_Eg9$q`?o4C3g=(PK$y6C)S}&`fHOnAL zq%pHllmZ6%ecM1M#6|W+GqkWei|2mt){;1D6sU ziqp_f`yD%)hu`>MQG+JaSEqqIVw+wDpCl%=0CNdXsr|v?NwoU3&O_w02JTuW#tiJuk@_>D&?x^6G$bX z2<$|8vWnk^Lr9%tYUt=g{NMaoo3-0~JUy3lKGSHc!=7>6+Vil{gb)fXVeotxOW?yq zMBn<>13?@ebhmd;wpkPJ1WASx=xt@CJ}&o&Cf8b0izBH51Q*F&aT(m}&X2NALU~Eb zq#KflK-D*uzq)-gRemyV-D*j>O&0#R1Bu#rUl8NnkZ{HaxcmyDg{0w%ADjr;kP+i4 z5$)48*^H2@UhW-K75rzJ^!!^o2hE1kIO-u4((f=kFZM$0I5GB%rLa8&of#|6emDgvrX;3~K^=rhs@%-V+IYZo?gGcp#1Bgeek5{ycR56D84PX5ntfM|08R>um0%!R6188CULY!e&B-g|TbiPXuF2;I8U18KY!g+64>~GCHWcpg6q}Eq8 z4Kz|hRd<@@BM{aCM{@U_oSeu`{O0-WIKnyNpMQVf`}=#^M(cr%)}yJ47#7jzk;g&2 znmGGCXjX7}io50gt0XlM`KF@c$va6hZ&`C`)-@n|FET%aN547tzKwMma6+(QB$y%hJ zv0o`|@8;!o1$|^5Ol(irx>3TPP7{KYDUT6FVrnZv382Am{ah@$Xkq$J=N!R^Jw`UI zjd1=Ge>q4+NWXpzz>F;*W^YZ347jaktr^yZ+ zmBp3#IyzAW|?7Gp=N=12Dt61!jwW zzW#vy8>r_U)9*3?kEpZ_d^pH%$3OqC8TfB}rEykv_AEiutzXZ@ZPM}L$Cr=JOne@@ zEjVtAeZa^ONJ2(OL5f`^WPt?}-fP8wqRQ>wmzLfoe+Al6 z%$T>lxVSH8XJ|Q(jjcM=YRmC));+F9MCi$Oe~z%4QsO$(s^A0O(*9R)N-QJKQ{;p5 zZG{Egn`-UJ@h-j?FF;JTY~4C^!h|3$TtcvGKAOz#W*wfMq~X4Bkjcs2%+2z49NgU8 zEEOW7j(diCQxrECHwu#R9zna^0}StjJc)eQ)y$*NdfsV+a$xD_=!ts^GCD8WCHeUf z7O%wgyLVlYw;=}-M|{MMqdYqknF_DNheeP*?op1eY!NnYHGq-7?w~7mtHO)c>Ei)f zk-b*ewCVk6A3|4rT}mpqI{BGivJ3rx)eVQARjHHi&4q>Hj>1L-s?fyipUfL>dnP`O zdj)Xr8W-15L!&ABjOwLp_bN`tpB15fDw$0Sot{1N6ZT%(Jr@^Upc~-W_B<$%TLBO< zH%3fq;uA{H+SDzsmOk>`N&rls!QewJiy~i zYsrL8UMe&MLR5ifL4wvX<1erx+%vpeZgd~T*&-EP7>_}tgl$GNBx3m6v`F9NQQ?PZ zLr_|cNL*^SdbRDQP4P(~oL0V+vqJ4G9w7285s8KnrB9ze27#VzE+)GLm9(}-qq!r_ zoILet#)#8VZN*0{;yPHhYz2{rY{%P!yf@P_!QmQOE+Rb`c`kk|hz!XJo^xYX8x}rI ztX?#-kM8U(9g{MTj0`jq9(w?m_QJZUX!hpe!^2di8ta~o0{(N*WzSz8icPj4pZHm! z%F}9D^I1ZA(}>I_-`?l8k1R}-ilVgOAqo_nAPFO%lOzQddjKPH_M8NmG(#t+XYq2N zoSd{C8r|4hyV0Ysu6sWB{FAF!uRi&(RdiG$O$Z(K_dh66=zl1Y6c=NQwame8r{`?Z zb*fW$Y6?*eHVQl(YkBX)g++zx8qYjd`gEG;BpWt<`%2nsQ7mIPfK1BZwC~WR%PRU} zVjBIR0s(YNk}vtnx=#~9a?$-X=&`IdBt_D@meme0HchHaA*rP%g?$rPID@dy%iZtZ zyZ1~0*}kaMfeC{OPR##z!H~oZEF&Vyo>W_jBk!FVP}*C~2A-iUuY-?qiOg^49V(-$ zZQ=2}A;=5gX){37&u;r-jXnJUIJLNWi12~U!q+P?=Y`SR-m5pb&$ds+WQFP7^z7UL zddL1!Njwt}74oBi(Hmv?a}d44NCq5`R4 zz(xCRKGmm}rym5qFeA)ChG@ykjmsmQz?CWXK4{xq|7=$C&Fs{a zSkK6d-7b8q`fT=1j?O3&xE@4IK+=>B2B^v@YP4Da7>>XLR`0H9{Kw)&mejaGuNN*@ zVDPLhpozp_oYKv@+7JH1eacIE_z0!1eVw>Lse!%NW{$5_*@oTucZ!k##k>r!O`6getfG8(>VYDgWeu9(H#P0!oq4qvU_OY{@(!iEuTg`5(TLzoovUO5+Z78`Y zvFzpDRL> zjU#{*_38pf|7<)aa_120!b++sjyfN$k{8}k)ny8)+FhwIZ#Y!YNbZa&4P+I?B4t-P%-tmXiwn2j;e|Mbd(2+xX5okV|)u5V2X#PMV71eWMvbdN)pg~7nUFAJ6EXK4wAydTU=Z$ zB83Q8&@K!S9RNpDqjS;tHIE{4jx6Rb=FCYk8w7|qq6PA3~=09D8PH^2F=)a``?>*%f6hw${Xf zG;K?ZLxqod>$05XnP2MJg@rvq0^qEpCjpa0aeVpjnX#NadBJ@#$_H@V0=4AZ{O@Y+ z?s}t(?gppH&3*H1LiW$aUGE>VDfoEJDo1B)`&+~3=BJp7Q#12#*DRfX7%4~8vMuHL zUUwe(Q6sT)hj+9nat#56;Np2G$_9X4F(hK|?_YoHTdIBjmwk(D1#Ttn#t@lKK_+el zxBK~aan+Ikf>Il>?%i^$?Dn^g?>ANGSDQD5jK~Y@Dv($Sg&_r$19Z`kAf)JjhYT=| zG56yR0=5@JIf%VpMiZIYET`M0sNVxF-uH)ZUB6DrE4Aj12|LE$=lDN|E0$ZT23B-x zoVlXVft`*VG}b@36^*XBGC&b7^g|qXj}qvq89Vc>Q*mT5y^fxEw^0 zMD2e_AkS22LI}qN0Z1ocArQqc#ZM0?f<1RdEyVTJG`sArw@^)2+z*Usjc=Sd|6)a1 zXlFw`u$Xg}Q%~Rmp+w+iT;2G_`mDbRiH_7tki#fBFkEoem`5wrEre|y`Ry`Y`hJm; zFQSl;+u1LUG5_E=P4bgba=uB>rn=nBb>nm$JePQW58t&RrgymgvY)=~!*De&u9*g3 zo)UhVwV_vTqR_qjUAj^+jPfPlj%#m>gVj7#q$cBAheytf6BAg0A_B8y=F6V9R3%7! z(aK`JUi7-(&|lO*YyI|+g!fx^-ePCS|H8|S!YTsaNeNKM+#SyJ4{vTwkU$w+GFl-Wj>5upLq)vXRO@P*4%P2gIktgj}BZN60#`x_`v$O%6Y0G z!48=VW#X(AV$2xV!JFK7CG<4EpJ~ReERd-0EM>jn1{N z>mO=<*LgQ_alXUS`Dn?AJ4~m{r+f=^PM@WH!>jqI;(_kH^v8C0?>>3~oEBE|tC>bSeTywJ)xtcK<)@y@@y0``b2rNn3L%O;Smbq9{UUDHSqBWKKvT zGG$6MXwXO`bDJayMaVoA86s0sqKp|LQ-%!Ban^p;{XT2G>v{iycdzxk_uhAHUDx;f z`JBUX9OrSu;q}COo(UsByu&>@ZtrqH*wrLksL_gQ$VK-^za`ooUOrokLo{uK2MUhZ zOF9-c7G2qQT82$(Y`IGwA+6CFub1pban33FAnHQM&l1i zq9q`aCPLd20Kvc?dB6n7S;v~B8w31Tk-sF8^e`o#{ zb!&KExq|r^jJ6gtN9gwj|GMX8^3M)YQBZ1}Kn(!U`G91J{k)Si<%Wv_UxI^+?T*fV zBX}m&C!3WlgUJ2b{-~`3=_wF>U}rE12K^uQ;{}lAYk<%^bu9H_EGi zTMntq)smjgKlX@lMcb*p+0x&CP=2mP6}Q)x%d9Q2M^n#i2^1{Ui@y(>YyVGu;d01luVb33_&f-$VPt|F~U!(z=N6?Np zltqj4z z5+RH_+3EUM6I81tFM?Eqg*^&pT^cflOqlM%dFK)oto%9-)kUtYcGTS>=@qeAIh*Ej z?K5|o&M}$Cb=ie$x!5{axcPGiTiS`?&+P4;EvNizdxQ8 ziapdh$=!7NTS4e(uzFe!Z)*JSmg83W__v>*Kbb4lA3#4uiV~2ZMjIDA$4!*060F8{ zC*M6f-@(6LN1rK-HIFjDdFh*>eI>;anUFLjd1HWvdRlxFkc;N9b783$!{KUZ7=JOn zu&?;dv*F&vmH}XY7!`&%=E2}c1V@0H*~_rj#+n^JKX%Xsj6HByrRR>&?Z|1I#UJPl zgN;ZX!NQ6R*r};yuN<)6dSgab0ZWU2i>|r#xc2lXK41igY`P>`1UCD!1*x@O}c(24-yYeo7qb@0rO?Su$+G z9zc)|WUJW#(HbiD6L8)5?kGW4CfJSRr>h$p4YF`2;uCx~1=CV|e2(Zb22j>x=8uX} zOdLR_Dwxu6<%tWFiAr6Uk0WdV3J5_TGEl-BA+^hDH?Ah43|WVwO2)tn`BekpgFJ!T zYm5PeX1mDIv5mX>mf``m{k^E(4;>|QZfwr=>UTDLGR!!xo0*n#1jA5Jg+O(!f&T;} zCNI*`$Rz|a4ziO3m{pNGJ(9@lmW>zC^6i@mYa71`VlN^Tx8;!CPGJ+DxO+m=7-t9s zq%w%>NGJl^>eX+7YQMrt5v<3}8m?Au>*$EA>B6KDzgsr#pkT#XFlfo34u<-h))Ye> zM02kIS@5WWMNiYhO$Hh<&&aRe0QJAcf=!QQ%}Sg9w-(^{uRBu)8aX~a-c9ikkUBk#L+`a;AMTY&f;ZhbLLbZzSK{Qc zG;|icztQ;Wz+A4^_wF$wkz`iM+oiQ0Smzo3MZdWeVj4A>@ylp1Y4JIkApH@%m)hKt+$}e1_1_B?bb~&^=i;QZ};h^1+eyOOyc_8z!qyPrZVQjldLQN2y zH)LYuIE~=q6`Al1+z3tAV1{fNCub!nG-C3Q#Au@k4csIB0C;?j^2-I$Z~fFecQ4dt zOR=JgRjv^b_g?0h$jKGUm+!D|1O9jjI??STp`dnl!kuh>c#zBLY@<66-$CD+)h~`B zUS#eDT+LO8HsmUBX{ZTGN6*xkW%h}p!nkLxn4;Oywnc3WFL#iCEE z2#bjD0R8}~$>F#e#T6g)V-^6@FsXC}8TCD4YG_y(X6_GN?~zhr3z$RYB~V)v6!UT# za&*|UpfW6VLwifW2qsTAKxl=#srT|^K5(4xH8D%*XCjy)m5@~tLMqmuer5aT4+4(* zveJlh~ay5vmVqLSw=dY#nHkasLhlxBzwG}PPMTaNHcHV^0#_9-a9?Oh&CXBSvVl69emqyq&(98o1u zK$BS_o;K}?V+rydW#Lt-NwYPkw+}BQ`AaMcGj|B(gmg(0gUM*N;qq9^+;iB+2llfU zVf2E|0DgeKE_4FGRbmXVBMX?G!3du_$g~n@y&(vbkE&7!;D?JvkC#|`S66naV$AX2 zqrzk1YHqG9J{ZfQXhiY31H=D?Cv}6)pT7-Py}F-}-|*q0;+Hj2Y$HqHKM5j%T4yg1 z%K$rQ$&0%Gtx-TA!kFo})cGz#mJ>4iOE2)W2CJ0?2k;)NA;XHcdy9L7W`qO|FuV^# z4k7{*a(p6az|4SsxeP>*m6Ow!)qk5UzUaTWsB2}^0v1F7C|( zKh7Fd(b~`9VXZYXjhQOANclV$jS%-Rw*OE&jxrTyVlmwuc{uhY9MLeAv(4buUAT6U zm&-J^?(vJ{qripGe}k-S1di4sh(f`Y-A2QT(6?fZR8~D68l?}U*3w{fIEp4FTrfc_ zfFK*WK?p8x^n9R@fGn;U)Cl+_zu@3fASg(8F%KUyAiySe8Q{ek2M0_<4&)?GVFKtY zUId&3{C5HgC&{FKT0(w0Dk@||{fp|A*IK@Q`+_!zK$Jy7?pRyz4e%U_PZkkbh4!rp zwi7#?Z5?vzJN!Q@U3Cr!5V`*8+-9R!dX66RHqJ*}Gnb!rO@1!dJ|1B#QwDIM)KQy# zi}q!brGCD{EN14QU6#v0w2%h{{4QrHeEwHOMPf_W-T(p(p-qEu_yU$d zk=;7n8KAPe3dJ01iUVkU5&vAj8Vn5%mY$n(j>F6VmM~@5pM!1@$*&wygZgB67(<9c zujGa49GuZdIFYatVNTH__hWKO2Krx6Adl)n7dvv1do1$2nB+9X+TGLSR-&a|gS&p@ zBDTWsCvWQ4_s`tguNiwmd*BRyZKLV$vo>Rba)BT>@3Ae0CCW&cGoSmZ$eZu3tL@i( zBDT6_d3O+7#I`>HKl0|EAb0tR8H9JcY?~%8uwO&eDIG@)4VCp=AFo}8*oHxSGTNZp z1wtnj(O00ig#wB}=La9Yifs8+!%p3GH8(SRjV`0BA(2dGHV(t4v!hMgZV^rT}3f#^?$81OJb?!JOY0*py6rUqf}A}~Kd z^CloL>4AZcjdnc*v5Wucgoe}>W(}!Kp;TL5*NKJ0>FJE`Q1;><8S^&Y_r*O!5aWz} zYzDO_ItkH}hqSc)UXI@p5ujUC%=`Cg_;opETs^S?NRXlU4CM;2`amxIkfxH)Pq~q~ zDfY)dz$P@@1B-&o^H@GhU}WK1Y;2cDCxZG=UfwYfgDyW(>Q7=gB_-}b(r~hg+2f{y zj9}??EDNJeuBxhyZf2mvSGr=lXAcu50tn^8DATO3eSH-uhA4v~%OR=JiZu+a;nQO0 zf)50*SPM@|1ehI~L6?TZbQStx(sFqfpE|dZ{aR%r$MgdKuO>S^Yy`w6R~=-$xJi{4 zdUm2U@2r6h@1hrb~R;7M(w{P4^JaZC{j6sg-ejp!;UH$ zKI)h1UDSX6)X;?kV6+0;uWbu941G{}-&2z1bnlDu%w8>*@0!NV$QgLgvlRMEAk6bG z&&Av^G19oE;!tAaib2j+1@LjRYAEF!Xg0NzwoFY;b-n+WSwKJl(|4Nl*3Vi}{&E|I z4u9DjEi<6Uf7kY`XjcvlojlswQUwMp@2d;GG>c&c(``=K+p00bt8ZZaJTi=6pty>i zQ$fMo=j&s=eSNkjsJ0Kf;TgQXaqtgrbWI5h9Z%9Ch~;(Gm;BhTsr!3>gu5Pz0l}Jk zMLrD12s!);X4CjQsv{Nvz!VPf77;21YsRl6>faJjv)b{vtY%q`&4{K27YP5u^ESw9 z6#Z*ttW|#~UfTgK8>krki%|$xf^iBsqzov7Cv)pS`MPI*!O6dkb;PZmFP%#)vwoHA zB4c+tf(s5pRR5Lx?)iq##h$4?Y2*Wr!z z1P&nY&oyrY#nXWAB>aH387`6xOmGw+_`9I5r9oui>RU!!CBkmQMf+<{&kfj4LfuJY ze=Y;Z8Bj^y#C2D`zwKWHF>}%8@>=~nBGbG27Hw%-C$&vmZ}iKJ1DbOUIc^CgKCx+2 ztmFE{o3J-V;8V+k(6I9ILSd%$Yxcpie95y553m3Z#WH7xho+X)RY4iCCl}^E6cQ`` zcB7KtPP;M<>#M1H0F!k9ybCl(xyqsUAngQBMX?*)5CR&|PnxC!AO{a-q|b(zmTWXO zVcVR;K*Ql1G1Dfp;P>uNih1g|7F!N1qjr>wPs7f1i5G6BAEurl%^=Tjq$W6B$Lve3 zq_MjHBLxm3lqA7>fk!g|`qx8PABpDeTSv#0s3_B*^7@QmkE&nohl?Us9Jtux|CKAR z>Qlbb&*w*N_6W#^kG-g69Eal$xwoJ$g90ErNW~J;ZDx>CuDFz#Omhp0-y@ZcSH@!a zAa~55n^Wc=c+`-W8~j|lV6KAT4t)!sm^9F9vSf1UiI2?H!PP<`6iT8{fK~A56USIT zr1k=VC-^7`Y?1x@ZNr%W|8qmY2D=DadWpde_yLoMj><@g1WnD%_Mx$ZF2M}GJGi)4 zaUGD~NW_EsgP2;>u;?Vn))ds0xkTk+<||3=U7ib5cXEUTSDEC!7xD;;@pbxq=7!UK zUA@@OXtNl9i9S>X7`?zK3(AM<2eHKre1CXi4wXL0`4i|a?$SWp%*E%64Tuo=W47TIhvJb@!R0?@*>yYJ$I040Bem^_Y< zQs$VmNYXK~C&u)7YT{zP6(4@pbxm|Vu;9CP!rbCPgITmkZ0*q6?VN)A!hBNxh=iH~ zi_9)R*#fNiWXh+j5o0sah_KL_w~G@K0gfI=k!E{jCV%clUvurojm&;01oWr=hoPr= zh)b-N%gVJceLmJZ^73~2RyN}pBSEv6qT+*jWZ+0GF73Ym;u#hdyu%?_jW(I!tD>Qy zA;6!EyrQBj=o)GN1qNd804u?KDgNvQrDx6mFSx3Lxul7%<;Q<(gjejy(g`uLVG&`~ z=goR|KFx{c^MiMF9XGAS`(7mIhyVGC_&zA~H7$2LrUBii`WxvRfr#d4NbsPpE{A#& zI0sc%MCL;{!=AL=gT=b z%FxDxpSmUl*;ggac_CKANtPBAvsA+OSq5OGoW_n9APfE<03iPCCx)4#7+->I=l24sg!Wel`U(u5ZRB`6SHlP{8Q554tK{*Fru4By926>j zS(sJu!BL9V{EbfzRbs}gBeOpz3YR=Pc;T&#vpWnQT06#q``k-)~)?;t=I@e z56+MTzXD950VzK0Oy!*<+3J7TKVS?vHpKUu&*xfyZ#FS2Sv$ZVcxO*t3i0qit2z)i zjDnAkP9B7a0cBy1yu2}UWWed!oH1A{HT$5H!_`fDcuO+k-;`F|6wPk*_xg0>(Rr(4 z_YYrgFJ0*N)JkRvj*80zIQIZxz;}cv0Ym{-aLOaga|X~Q8IK88Y1b*DfxpTO}k05=L+0(Ea8{)F!+$)PYf0nKVC>{{ zU}})iYoJqz#4nO{3p{QZ3hNpx!z7CVi_nfW5Y1@9Of4;SOZZpHOD+CWjkuU#AcD+r zV7(pnJG=(H_@%EVu;t264TWVF^4g-*b}4@6>N~UFY|Sq`o{=Sgd-fYe#*2l^>&5*A z%3EAb&CTB+JCS}I3$*euHUyS~ruact1TQu}aUuzm`GioT%t7fVOi-49z+w&(Nm3xehX)^`MJAjjg!V%z9! z;Dc|_Y@_HT4^pBMNgshS3%wE-C^D2lQB@~lZ_tV7h%>VFoi^HZKG!!0m0W*;Ke8?d z9aDzi(C>bGf7H(1ix)1`N^K|4Ns!14M~QVnNnkS}E9_jIY!leTG;+SRWk9-zuIgO` z8`7XK(6L%JWu*^3FiIq-5Ls*K`t`Ss5@V1Y5qW$ut90<-Tr@qn(rnmEg#ZO&v9~du zE!(IP&i_S7uY|6^#WD+GaZ=Z!@1zxuXhi($C#0vHCo3j=^(9w%we^~HM1-rYh~v9s z7@{>-<193z(0Y-n6UqJ18R4?NK{-ycR(PW0dTvHj^+)_x_tYDW6`9=OUPsbKH!RJh zmJ_d>-Y6YjoCiz~(dq?K7;oAXRciE^BeW100&l-Ycy1j6BTlXrxiM&9$I z7)iv2XlzJJ!pdl%^%NFRVA2#l=oS+ztKa2~ifKv0{31tBnpv|6Js6Z=WCGd7JxuWgacXX z;!vFY_?!#ndU=>+AfVkH7*!x2QA88kGb=m!lNp~06BOR~A_JfekNgtyW>u4upFu5U zv>L5FPoG=y>(?-pj9Z(F{1z(Zy37ls%IwI_WV((aCLp{UXiE#`M`%3=O;%v6%nsPE z(vtPFUUs!#c@g_<;-<>2lQm^3;^D#;i!mT4y4wQW3g}Q~Y{m}Qrp)!a`Re{CE^I@? zxdmnggo3Z?hcsX-pd~|??S)G6hISl{j=`gM1smJF-$pudb*S(9+`qV^xRme6E+rZgI#?1sr8T+TK$F{wc|QCi z({~uC;K`s^eGbMI>F`KN+;VxTdH)3{a4#=lSWe?>xRp2yWHE;!y{*LnEKfjOj2E#2 z$T+#802xPxNtp%k1_Y!B4<3Ao5n%B1@kxQvPntjZs$P=vzqyxRQdnZq;EpGrudo0E zzmEZCbIt~2b=63g{&*;!leJH3usUX+0xA|BVQc9er~wx06>Ds`W_~D zsYgKxf$A4KMjxHiK0jE(3qc@cH2HvZpqKpVk?1aXpsbqt<|xN1d96_3s>duX6_L|W zE6hQ=3s9Pa+y-cviYNjYSi<*;Jg<;>3jk`tOyp-Nf8rT*yX1R|A{93s5tH0Nci1*b zp=^JHf24;5!2fA)G}Rc^CM$s;7aUYmgS6`^LNK2xc+RDm!FrXM$q}l5^ypjEb_|q( z*f6G+e((H$D0oZ^FU|WPIO5*xG|I1kh$3T-=pnMq!AnOcH9a|`24|KFkVU|b1v2HG z$RZM$&$uAGQeJC05wt&k1aR=8DmDYS2PMPRdL#Em0ZQKcqsYehWQsyNE6`mue4DY% z2fyEZ!4Q`CPzlLEU;}uMfe@pX47_ohz4Lc(6VcJ0C< za8xRQ>-saL6SnbSX(bXF%1k>@OfSO3g@pWYv`j24Vx%6|bRC6FD7rvsitk}{0GK@B zGAzdi+e(mMUO<9?oTJlc<)R1q6}Xj~w`L~hQ~W`b^+4F-c2Xb0j`XS?M7|6Z7PL+X zjw|>;d2il$XJtuItA(On9`buMAtor9Xy*tfQ}}Wloqv2oA50s92(!UsdgEx4l?#YA+XG1NG_%{Q$SZRAX+&JbnyUDmmQj z7hqTa!Wpz=5T}*0`OHLpE-1D(ymxS=kyR(hFmHu^_8ao>LUy6qsF=~bocY_+2E0N^ zckk|>k}ig|P? z4TBGv<~E6S01G(ZQ93naxC zk@HYcpZJ*$NKzFvIL&-wjO;jwH;mkpAuP$|Xn9Sy6O)l^`wDPQQEPMCC+^Q)=StBJjxhcyqUv>%qdzd##;tT!9@M+FchIjJ@?1O~5gTM0& z49>HS5*>ffhIr;N`X5LR4zn*z6u0)*)<2(ahdI}zT{9Fe~dv=5LrqVoe&JkZ&#>V|`gpV0o_;tXWGAjGnrpx4pD zT!`GyM;}T=WF)8|c>84+dpP#cVv6YV2{1S0V{iN072ks!7o@9YaML*i6@h&p$b%xXn7W`Y^vomE@ zAIJaJ0*Hv1qii8%C(`Bs%C1=o7g#trLX^UzStGYecQG6aNR7TXAZ8+=g|+qsh%P^x@j{!G@;5cH_mMoXYn zg5OU=#-lKN=h9WL>bnVJ`!5BgurN{tyw4Yvy^7LtIQrT5`l^Ik z?~AJv+Wzrot=#svQnH3Y1T2Gy7-GO`dM^az7?B}*8`&TU4-CWpI5MP!LE1pecoWDg z8gW#H#7IaT)uwF)y=uf%1jsYud;9 zB)cek5kLQKYnu3n8MgB^Yh(kRtF3CE{Ofo)`!oDEV0GX#dx^Kph5yffdzQj2^~lH+ zFNNzpZKv)nx{cXaa568DVqq}=8vJ#$!MRXQJibdigdGQFj6s`WomXAPBsHlH?906l3WD2bqPAMp8 zN$mBUKR_Cvn`&v0y_c6h^aj(yp^~9)VE4_LvD2A!TPi z$X~PYO%GyA|768eq#65oviJCj!@io!}XkT-B~?GeZ06~*~PxL`MXvFNXY zf~2Piuj8Y+GqkPO9^JbY@NHBZofbJWUR zj$HGssi~?cZ=0kQ)ZZ>C6&*hrl_y?A_!;R7b zh%JiFt1s-TWXY*nJvC|_R#ABYxMfnEW~!^#&im$BVYtno4J5gyk$M8j=a6oKhG$Gwdb}b{L$3`aJcH zwlLYCIm&EXOiWBouIEf?*b|~b_`LrBxKb-`?Jq=?Yj^f6 ztqDTXI8>s9t;GOl?$IEtj*+W3%G2X`3xNIToS^t2kQpg636uZuMXE%EC8JcJop6p@ z4eFRD4ofmn&V(o#*(rLg{ShAL_3hF=@2f4crfURsJm+l+@RyDy|HIBx@%}Y z0My8%oTm*1FceAfPs!PW=i8CSX0f{G%VJ?RQ>`akYTMazY($)lEZ?EIdW(vg2v0uu zr}EeAmmv^9#P|Tr0#HD*X8NzPjtw0}%ljUWsxvpzk$iFC2S5 zpeKIw=IcEzvC?SciMU_d2htj#Dk}#EQB5epfJ^`=SV!CwP6pq0 zFBjXwAdFri6)tS*1S$ZWs|AJaz5P&>!{b+<}XKDS-Jp5MZHST3BRMmu_D8%iX#xzYv%<7H?h9i!Va2j1tEl>LTbry-|!szNR~qxK1l{86s`!R4i;TH#`ah6rF@8mW&Ka2YL3 z|I}8zW)%X&>*k0*5khM>5(TJM)$`|bgid(7#&)iJWL0*0J>tOk5w}Z=3!NiWW|T)+=1H zLD+Sx<8eORvgyThrLGz7*wbZ+$7a4=)806t=Q_tyfUZkdgl~R`@$C;?V zhT40XDIm-{j_Z&=0OBIG#Gv1VQRO`FpBR}rEW@@+UdqAX%jV-|W+ppOq+N*n{94E} z9&jw!;+VBPBNJC;i0nX7&wg{Axc%k#@81hWM9{VO`Fn$us-Q_mDtWrrA|h+g#QoL7 zsA(J@Zn#sSdZofZ3Njv3@__jC?Z<=`;FIYo-=!sUx5COrjjld zBVU2=+w#MbhkAuMS0lH#zDe0W;wFv4pXHV<4#6ATq=5Q&m6c*|ZOSn;TUy#(vWoli zlU;9|7Pq!ezOCDAYSVFD=nu{n1q?{o@-^*lr(if$2Pk z2^PW|iM_0{l6U*|Mew$v$p@4vgkqx-B2yKQ2}g7_=IfhG62EO zOZ%`~yupEue~WXAE9MKX+Amt&)T5C)(uBRt$RS}6G#!@r^UZv8Vk92`I5yUsT*;srPEgQfOAN>X zo|F5*OOxpk0?op85?U9p%3qsfkC9mvb%dWFF;rowfBJXx%o?**E0-_dLW-Zo7?>t> z2D+)Yi1j4Kzz-!B0-KH)3K6=bY(P`JB4;yQqW-@HqtQX&E_ar z5-_uBOs|RBHkE@g%!8mEUQ}8Y4QLUChjOSJ05%Ac7diT$<>tzu4R%1{@bF3Pn-JR^ zMF1*T3+O zNSrQT#UVvou`47-Co~#-K%Cjym9|wmnV&~4nOQfh4i2+JA;_G){!5A#w_3!WqF+Bs z#W9;udjC6M>K&L@K=!??8lzc3%f5i-m?sG!9)9jg%vHrms8{5tD|9a?*^}Ebtb{IL zunzumcVHqkFbP5wBO=dcFZuVoaOI~@8OK+WUN>t4H}fA+Ig-5!9&hJ`PLf&L+V;;6 zuBxGNC$eNkX$ykyJy|w=cs9=q>cUNitygAdLDk4&I?-qVedoi6hLTvA>;ttno+5G* zVcFo2=6eAQZU?i7YxWO1*X>rPOqh#e(k^}c2d$B?Z%@8{NCE#eQF05=z@9EzJ2N9 z#aF0bV(VXJ3e+cm#Zb<1$Rprk?t_OB&CZmE?IFJo*1zZBZFX+NoF@6E!eSo2i>fG? zAwpjc0E`#Rrct&}@OIQ_$i(`hH`xth3SY+moJ((aH`A>aXG6D%4;^S@548XFZ zaf+3Gnb`%lgYTVl)=c!Qz1#OOu&I=70bfUfE+9&iyjdDVe|-Ha2jw)%CxODcd0f*r z2p?B`eV3wqBf}_UC@(-dUki^TUW{#ttrV!@uPva0sCA==0S)Wg`P9NjiKWL^7;sSH z>yx(Gcu>kEppHV=|YXnPFlUJc@g!?CY_L*NWlpuYio zG1vLZRnI6e=FP$#ub8uLjGZu;&Gl)}W z>;U+Xju2LQ3~~re%>EU;=b;nE)c`*Q19~dHrN4acLVDScAU@i8tl9GQx@vcZ_r6~6g7yGrA>$DpH_@4t73DG!A zw0jfK;s9gynX%R-fOn_LPYwYbXsgyVATp)&SbymOx!ce`QQnC?hFAfHQk|o^a+9xcTGr zb=Sg>Cs%G=!{6|409mW{Ch9<3?&4}ZcTrTu&|nVh)10B(`K_p-@PHK?`-Ujh_ggjNVfP2i$mxsC8a zjfz9`9-P545!M^FTFtQKK(<7ejQ;t)>Q7wbee9^FU5+E5Qk2FBm3E2TAtfcDu>$Sn z6Do*xd{#cta3$EMM2oFUZ+1~A`Ir;d)+PMjFq6eiI>V~|V7SaBe4~hoPj1;c`&f>n z^pgl%%s|BpA=a_yVS3k4&S8Aoe8TSI-xNK1S8sLg4aP3 zN(#0pxWSe9!rc@S1LDfio_3(sCxIf+eogEt#aZ{~Sp7yUo(Yu(CRiV#e*r0pZbLtP z0Vq$d=@XAwB3~sUcSEmq_;@39DUg9-@9GUuSn$sLhUt(cP%*qSHS~n4a+_hsa=EQ; z+b^?LvN;IH%;b8cx4D>Se9O&yc$>>hMl2BOa3!!cD~D-k2^=Ztg|~Jt zpOF{b_ku_h{q7s<{*RH^g&7^FlJ1`zi>f;%4!{oe7@QH#;POz0gof4o*&eAdR6*-7 zE&wPC!U7Yh&`2B#Jeu@`&|oq0@aM)5!*9PS>X1=;jJ z2JYg>FxyJ`Yx6t0MSZ`2w}ce&*3E5aoI;LXPo8CH-^G4+l*4l^=D_%X-T2u{J@4V@ z9U5(~Zs$Ly;SFeI&Drn6wTYXiWtJ^l_O_&iL^v=`1$j0wGJ*}h5OLg4iYBLNU?iHZ zssQx?CzC4|A`x^?F7ET5dEuU`32pmfJ^vOrRuHs{r zp0A!p{)crM|85cxme3PrkBd5ZW{<|K)yv-YFS%07NaJtY>#p!DxJTVF*Z^{LDMoLoXqzX#dgVsyU1@#DDiDlLZs~hRnVUtF6la95ub~ ztS{iSs$)ffb61k}??Jwz+^4`FKX^)=Zn-Qq7}BGe`ATA)-u51siMO^1YRkAJ*P zoAkSS-=Ve>NBa{u6Lu^eVWYiPQ|;Y;j?@0gW`vIhNjRMXPx1g2OAOL#I)uo{aO#Ef zU<=OCLCDhRSEFuAJl)y6(n=i^sgURNS4%DXl+~50BrwvBuie z=CFqE?ZQgEcw>u}@6FL??VK*X`1GbOX!nHktk+JB_-J=w@*oWrg~gP1E9y;|&o-H- zFEQY9@`k=UA0MARCQKe9>cl>I^0qItKOEdr+l4vn-(ngm8JfICn7Qd@I9ev)WmMK} z_W&)wRZuYPhRODkIO8ck)K71}U7eZgY)#79zMY5 zAcw4qTVhQQ5Q94L_qU@tquG<+mUxcUCJtG?d-fqU;vM6s(`LCgWz@`vzNIV>%-XTa z$i-xnHgg{DcG=FKVQ~i@rzQ!t8)iJh1IQU1zseshJDM%Ht0Kd+Cq9h|Ka%TM;Kq+P zjphVGrPY3I)9oj+v)b{!R564F*RRk`w|8#@;8cdjAPA|j6AL*iAQek0Uhm%KdtiMD zN}^zH@o$&e2H)A97j|-T3X*cQM{XI5b%JzJ$Lr&@r%#_Ip^}M-jBIh3TGE1U-2&?= z-Yq?_))(Zw49>5bAK8?TcY02DhT=)E?>HtW0NRC$HdnwXnnp(9P<*vLjY}v6m}_6U zHKq;z50$VN$SoZbsQ>WimMq;cD^KEN>)s`XKQ4Up(M#*UJKi7PI6Kbi_u{x$Ax?Yn z(H2YgKA-HLnWocdk55mIHDH&ND#SZOt&`u0j*PB%ZnPV(oSCeIWwRjef-Al7uNUS# zMs~CWFo;6Qk9mpI4nVRtKSl6k4(sRyN{==Q;|tpQ%doF9&={qUjVdEw$Rdb_Y?@1^f8;taYl@fJDCpUL;|L&R;~1962&FDu7EX+4suy7h(^n z>P?_cDuF}~0m`+2A(VK_wN2H)3u%&=KMeLj)00?MA%y?5!6x%ZHO8S?oUo>C13>P`im4BX=nE%5=T7Mps4AoZcrh2iMIY}+FjLw zOqSsJ3kMoD)-jmElY*MCl1)vj>Z_Gte@A&e0pZoaw$`5enV$~P!aZ(OjCOvzPTo&C z_fL6D|dd*ygk@U{+GQ^VRC;it&EMRUb-toY65a+!u z$RfPNDlz;1BC*CD=x$2htTGP8t!ux+Ve=Ns_OQ352~EE?EC#AXe#4%-8{B0XW9K_1n@i}p;`f_@l2`QWKi4TaJ`^z%nmhHwmcl+o zE^bBKN$@}s$QsY{>mznq9>(eVaMU(1@gZ1d@vmOYe%pGUd-SDup1`$(s7S-EG8D3S@Iillb!y&1=3kO;7#q#In5#WTv}dFt<~%F>?9l zjp|fNcB)Hl=)a7C<=id_ITPK8g(o6Bew929u?gn3M0zT^z3%tuoE%3Tq4WEGO`g&I z)%Z$(u)3(^*WNWhvWMPa4CzGl3rM|Bl!uNw^xYIoK51#_XQu6>QV}+ABB9I8O|No1 z2;b)UeIy5l@lg=ye%!cS#$BK2l~S4VLOIfxQnvWOYqlXgSt;}krAyBLyhJyWZWR8J zvklU~x44l>->z^x1}CKGN99L*d%O75a7_(Z9PSc<%w9k4*!XxAM7gP_g=(kXZ)uOt z^!x4kpmx&u`S7=pG8whY*Nped{?<45aCy}j8STVAFugcEcNDjiIZtoD*IdRqOA!$f zc-)(aemr(*;su^Nf9}jwzw*%)R!TTl_XP}tQ7g$n4x@;HZ8)KnpBv3=R`F|uTSa>J z(>OqWm5yi)sHnp63P;J;k^0%j>gO{&qs@vQ!U2`L&cALhENnluay!SW4d;Jk9m($Q z|E*sq^5-)g{PP(=2-r?TtJHx!pqA9zTS(uPg5Gt0Qx$M*=fQj2=Wamnqj}^ANrLK( zF%<43kUd;&;8>;Lis^ymuE!y?ip;wbf`_uZ%)iQPRcAbdP<*_!Y35?SvOe(ioZf6K zz^>36+3QoJ8+ZCsr0$9Mjr$s3DkL9iT*UAX7AR-r1%3c>L752(5mi5fvkm6rqXrfM z@o~_J-c3r%otb#m5_Gjp1S`KKf4tO0wK5r786wklT`PC;Tu1Jx&0(Fu@&4?P1Lqw) z9;<{tAAHB*tpE29oN|f7lAI5Z@{g7!Z5|)c4vuEzefsnLc_5A}!*#5I_M8ht!D^s# zl?|5}v#}kR5lFca$8obxAfe)QRj&SchlhYtaEkTr56{|@>~hYsINJoqr8aE)qPI)= z9yk7P*^$rI_w&O(2W0GG99v;+!otY$Lr89?YfiCm9-HRdUC|o% za*Fi=7ej8|M+!uiQ0>@H4{*-x#6U$k5Tr0F=do3$GP6Hn?fiTnlnnyy@AP-A=C^Iy zhBC?gUBS=F$#MH%<#=T3V{e>%~gpvJzDAfz3xA_GW6e2S|e)jC!(ogg>|0bDGD$bEz%Z;Z8 z10DOaetGmWF~;m?9~THDm5a9ABvhL#Ny&)PhkxfGQUb zJ_g7X;fUb8iUaeD5^uy5=jd^+7tqOcu?%1TyIYTagEsQllc?LJFN)Wbl-}dtdTes+ z2+W-{zp_wQOoB-a^JMVy6o@JZ8RJhZ0*2zJauyrw?cU?E7MnDfY2L(GCHG4 zeO}t594Knw;f!XmBl3oT5*jy~SKqGmLP(xI)^dYd7D&e1vW6_lgxOM)Ylay1Ne>8it`qTH#^; z+vfD?(*l4S;lwIv5$5z^jR&Vy=}LWTU(~MjTloz?n+F;S0S(a7(Xl|ydnjp}+~hWB zDpc|L&NjJr))@&h7|~pGw$)MWBv(lAgWEA`AHkci^PGfQqOCr6+6m)GA;jq+{f`%N zPT$41k_1Y6$Duty=fvmJY45hDHTo$Fq;u!Oy-KGcuchbF%s1&Zp%)%-x4$c^p7~jA zTZY|m?f{?OAG^_qn#LY~xH*KD-iaKb9C7=zt5Zi%U(pmi`|dNx@Lq{NZmaa1cDOAy z_!h#3)3>tk)L8;_p!Ocdw|?BN-LWzA`@h%~ucM?Zh2%>;S$B805OyE3*h(FM z!Yad;)j0ei?VW2AK6<;JM&t2jXo~t5>y$d}MMdGR9YMWFt|Jl6s2=#GJJB*H^oLKq zW>YhRLksb$>ph06xJz)4sH5scZ>P`Pe|*Fe9!=B3F|zF_?DoQ&DgwQdsG<-sH|lAg zELnN44Z@ZRwAQ-KXC5P>p3MIn`OERamJU8`H&!3A(!8?MXgg z28vq?02K`;SJ!E0c*4g%NW@ATMI?03?tg*`v{)R^_8WkkJ|W)wyzs+B-8Co(4kcMT z{lFLODV9V=MpLo->RS7Q=u-SWr|Lc5q4DZYtVGj8zZEqq9#%b?AQleyNdK%^|g!VKqvf!f;6r zwJTy@q`m39;VpLI%`-@qzGk}4-C%#(AZ8+X!~t+}2VR`j`+XQHC6;nd{f{@7486c+o=y`E-UlsERh2-7`Q7h*M)> z-}|EY1jNIxE!TO4ObRa3f#Q!pX7&y{0u-lz=gwsAIl3Fs+8RgKJ24&YcMb07M0^gA zg_7t%qhNX#DuJ0rXjNS4hjd@c!yV|&=t)QiV&Z_8-H8)tdu9gBF%=vh-A^0Ak;Rwz zF)_9jIyaL?8}`&ZQg&UTzCRE?%+fAvY21f`~%GNpS0FUDtg`zp8 z1cXCqk1+HYH4n7(8i0V3P$-d6z=*}3^VNhPB4xkaWxJ5!B|=qw#5b^;=b>fyMU|8J zGaOB6C&JMoG$Z!#?XWiq)ngok_7fTONq>bD7UP9tTj|K?hq^j*d~dLx`jq1)2Ij1H*^rnusFVsY zX@8=p(!zL_54K7u zLEacl%bnT|;2^d+0S>s&tdmMMV;oXpO8ESEnQ;ej*MrE3D7FGQY+i8@PKkN6oDV8m zGF^n30&PnzeY^*)HwBEdjdsP{Wd%hU85xB11UM9R*d`hU+==7Ytcp}l?Y6T4V4U@$ zY!2fCt7qc9xvaX%V;5#WD>H)jG4yw#u+VSM6OV1WswW0Eo11ri^oXNwhVT;vIw8c9 zW!f1wZm1ZOQOYLlGSb%%u}VnaOqEs*67-RrU-#mcrXdDWn>1eH{5$%cLvFcyfUViv zY~`V`2hE#5FVU^BHI3I2x3GS!mb(Kof~TDP{8m9t*@A2vfo>Kw_i)eJ_hH%j4NN^5 z&7spXLNnRUI%{mdnB1HEdc{+4kHWe2{PFwGbtO!;PiP8Hym^{6Tv0vz`O!A9g{K17 zy8Uot-#Fba$UJB_(i>33ujlw~z5AhUdlVRX-5vc-evh8j{=>+dI5yoly5?MydzACw z%!rF9uk$P3^bF&{YD1oz(;;EpCw;egm77bj;>? z7#NOw7oqrctKRnyW6hqPU+cPcHvDS(9@g7zej+$vs`+fklIBBwGvAW#)E6GHHsEX<2BlPKG1sa7Z{O^D2KZ7F18!xu2Y53JeT%9&obuAtFG*|rj|cOH)GGNa%eOi? z-Li>W--)u>{L+i0r27p|i_>|Y&c!9opTCcBa`xxNA0-uq?1uZ3G?KfIG!J{meLi~Q zhl56VPP#x!?8zMl`t?$3cRWP0&-NHZ7ATpzMlCmdz{L1)8bHZOZv5Box_4bh9MHfe zo@}+bWxU+D{MX*YDQ1&=f=9BASr;a@;&2%pLveCD8ST-U&oZlc-mS6ueJJ^US&?o} z$|)U<9ZkmrS{VTKc~PGgrB2;#dvA5`mY8z2SwrrUY;ER%#Ou>nM-RFVUCEUX5EE3M zDRlRLvJvHJGI~p|!ynII%lm0{YFzv(ea<8OIx$BwS9W>2+!^Vc+{<8m;KV2O5`0o` z6ZX#)fPiAukm9fTs3!0?FM0ej{(5*9pt6@pH|}B?I4K=8ZHzHL1Zu`JZJ<5YKgUrz zJ;0=B*7Oj7JyGO!M&-_m1C=tt)SlNyC)|vv)!DuLRJu2Cy-A z39h{Zs?#>f=5f{3h7%#&B~X+N0WLHvTqO3)v&b0ZfAE$PMqRup{0qWUBhUuIqN1`n z8D1k{^a<;_30k5$qupOqwPc-hqXy?Me3*75pi)enR|CMC_+5OHJ-J#Kw5300%|6Vz z!(jN|-cBFxEMbM{MH-LwBBho()f)yPZqlT4EDYu}oEVIAOSJ(SJ^w^wy5fUxHzm6a zv;aVq1hJ|O8%?r)ce`hh_TnVfCb;Q-!7B>zF3U@`kCviRf_! zu&;(d&BxCV;YCE929Aue*Pmy23glBM=m~;aCl;6UOMu78c`uB@vA0egNU`yiKfmoM z3NY&Nod`VMTp>4`@sJw@`C*rMEE8YK&&2pzL-#W8lrRQo4bJ5MgS9Y2F->Ch5dfok z*Hcl4Ca%tKJ$~lJKR>hMXW^1cP!!WaqlRdM8XRj>79_$PYTB|s;Y;_nML4P@D^D;O zYre2sg!-c7T4lruoG=*iD`@X{6)6>00qm7V82D$P zg!8Md2oIxw;cgfMTB5s#k>^&7+s^nP(QhD~B!kD=*Lr8DqHdG2)>1@1Wu)soy_nRb z!3DRN8P7Y=^)Zh2_&awEQan5A@aGmdNi3G-SK;GZb5kVt$-&}p76G_}UL%2<`S5U@ z?u#k8c6R&5#y>mc8Ys}Udve<%)P+iNe}1wh*r^q(XEC1r^MAQ}xPx1`@rd!)|J&94 z|9|;^9*YXf-fkts*(fc!(6tFVv7b{w;bKG8_Tj*Mz1N|~6gp<}3-OL|4_#Oh#P;lM z=*e@K>~w>DLiXXnJ=f>4UZ^|qKk(Gi8xQFB(0>$RCi8rw!$tf@>T6PWFw@)crq|D4 z$o=~`(7?WgCt6_EQD>s;q6IFb+!W9EDW9iprutK{gBgF|QD1SUTb1m8{{t?4B#OTg z_ACGI61DSw1ml8>+)FSPd^R(uKUIP(qLBC5I$4FP+hy8{rsDlsLs%Z!Zw>9IQaiRzwS2UA7yTHZ{uNG851D z61W5?FA$WV1VV=~t)O0}ebCemySb8ypM`>=3!|fd8iJ136TJxC6$qxmo?}-%52BfI z0J6)wllP$(4sT8p^=QPC__ckqN_>Yn7^9;5;U5^-d2xZjK|cf6_JEE;Hwe8gsm$LR6Y3Y2kimyG%E9jeTh9x~Odp|b zcSR7U(xVsPPx?>=HixK191h&S$qn@?RA_N)iMQ=Bo&gmNCaeZ{_aCvq3LOLAB{E|h zh$OnsAH7pcTl+C;&E~NpG14W_+xt`LqE`uK>xo5SNQ@@sDgaBwiyAn*69E|JUOtZ+ zrwfVJt}YL_QFtGOvcZoV{s!+bDp&))#u_c?A9}~*SHs}|#Z3)Z1VbPYcLM-YVN|Z% z9fcFt1f7WD6<5C!e=$v^B6dtvQvV0*7S}6_*9GHk-Ot#zrhzac0CFO$IV8_o--f`C zy9A7P1ff&Qqcvs`LnoFqaEn_JBI0DAUmbX>LcAYL0iY_viPVo9OacgBG!iQK=u4S4 zKG!oqdTs#AZC||3*$lJ$ihfAi{3wuhr=*d5%FtCFdAe-t1!{h%Ex7k!$+L2ttnntz zHk?=5y649YDOCl!GQ$?1E#6QaNegEElt26Vw3x$j=`^zvKR^SEyD)`?&P~=~mJ$H< za{|OC5E@!7Tc&f9KzvsmICOQzZD`yC?NXjuMu>3wnL9)x$rP`3I5dwx`o+0$ai*UN zfN>w4*J19t^S=h{g8+BmSO5pIb2gk-HB`Um4qO#>7Ss*_jz>HN4yCWPf0tlc%p~Ps zxssbY@B=&Cdd0 z+&=?NzX2Dv$N%@9`*TCy?>+X)b|)cT2aPVQT(iapw6+mAUK;C6e@Nx+#t9bmW8 z4>%+PjNq-XB|9rmEtv=!-~9RWd_8DxF}0c3Y=ykJJg^Yi`0o~IL=V_?1+8ZS4KagO z&H&9>b0u}7m<2Gri-2iL)e|^4yiEpJY^bj5I0jmh02;>v_B3+O0c-MKg~dsKzXHdn z(twG~a}sc1`6;lM>2(^ryhg znhP99H3sfD{q^}nMEO@>83i142907(O6?8F+x2o8C}qC_mZ)cebG3^p=T#^HhgssRN?%O@H57r{ z08RmqbOKEtr*!LXI{{p-ur+A0=rvzpQa;td$h_%p+3lrka&B5p2QJ{Z0eXA3<;Hoy zk(KG7B^K#G8$gS1fVXQ;>ekz(0h}B#RbRjYEx3`&VNgL1Zs`K)(b^WX1Ar@7wrn^C zY|bznc)BCq4zZKnZK3QE@zSIQZ|spZ0d{?4n*Qd@1~g@K{N;OXk;vd$@?2>^8R Bdg=fG literal 0 HcmV?d00001 diff --git a/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..e69de29bb diff --git a/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/requirements.txt b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/requirements.txt new file mode 100644 index 000000000..4631eb7dd --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/requirements.txt @@ -0,0 +1,4 @@ +jieba +sklearn +matplotlib +scipy \ No newline at end of file diff --git a/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/stop_words.txt b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/stop_words.txt new file mode 100644 index 000000000..87cf4baaa --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/stop_words.txt @@ -0,0 +1,1893 @@ +" + +# +$ +% +& +' +( +) +* ++ +, +- +-- +. +.. +... +...... +................... +./ +.一 +.数 +.日 +/ +// +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +: +:// +:: +; +< += +> +>> +? +@ +A +Lex +[ +\ +] +^ +_ +` +exp +sub +sup +| +} +~ +~~~~ +· +× +××× +Δ +Ψ +γ +μ +φ +φ. +В +— +—— +——— +‘ +’ +’‘ +“ +” +”, +… +…… +…………………………………………………③ +′∈ +′| +℃ +Ⅲ +↑ +→ +∈[ +∪φ∈ +≈ +① +② +②c +③ +③] +④ +⑤ +⑥ +⑦ +⑧ +⑨ +⑩ +── +■ +▲ +  +、 +。 +〈 +〉 +《 +》 +》), +」 +『 +』 +【 +】 +〔 +〕 +〕〔 +㈧ +一 +一. +一一 +一下 +一个 +一些 +一何 +一切 +一则 +一则通过 +一天 +一定 +一方面 +一旦 +一时 +一来 +一样 +一次 +一片 +一番 +一直 +一致 +一般 +一起 +一转眼 +一边 +一面 +七 +万一 +三 +三天两头 +三番两次 +三番五次 +上 +上下 +上升 +上去 +上来 +上述 +上面 +下 +下列 +下去 +下来 +下面 +不 +不一 +不下 +不久 +不了 +不亦乐乎 +不仅 +不仅...而且 +不仅仅 +不仅仅是 +不会 +不但 +不但...而且 +不光 +不免 +不再 +不力 +不单 +不变 +不只 +不可 +不可开交 +不可抗拒 +不同 +不外 +不外乎 +不够 +不大 +不如 +不妨 +不定 +不对 +不少 +不尽 +不尽然 +不巧 +不已 +不常 +不得 +不得不 +不得了 +不得已 +不必 +不怎么 +不怕 +不惟 +不成 +不拘 +不择手段 +不敢 +不料 +不断 +不日 +不时 +不是 +不曾 +不止 +不止一次 +不比 +不消 +不满 +不然 +不然的话 +不特 +不独 +不由得 +不知不觉 +不管 +不管怎样 +不经意 +不胜 +不能 +不能不 +不至于 +不若 +不要 +不论 +不起 +不足 +不过 +不迭 +不问 +不限 +与 +与其 +与其说 +与否 +与此同时 +专门 +且 +且不说 +且说 +两者 +严格 +严重 +个 +个人 +个别 +中小 +中间 +丰富 +串行 +临 +临到 +为 +为主 +为了 +为什么 +为什麽 +为何 +为止 +为此 +为着 +主张 +主要 +举凡 +举行 +乃 +乃至 +乃至于 +么 +之 +之一 +之前 +之后 +之後 +之所以 +之类 +乌乎 +乎 +乒 +乘 +乘势 +乘机 +乘胜 +乘虚 +乘隙 +九 +也 +也好 +也就是说 +也是 +也罢 +了 +了解 +争取 +二 +二来 +二话不说 +二话没说 +于 +于是 +于是乎 +云云 +云尔 +互 +互相 +五 +些 +交口 +亦 +产生 +亲口 +亲手 +亲眼 +亲自 +亲身 +人 +人人 +人们 +人家 +人民 +什么 +什么样 +什麽 +仅 +仅仅 +今 +今后 +今天 +今年 +今後 +介于 +仍 +仍旧 +仍然 +从 +从不 +从严 +从中 +从事 +从今以后 +从优 +从古到今 +从古至今 +从头 +从宽 +从小 +从新 +从无到有 +从早到晚 +从未 +从来 +从此 +从此以后 +从而 +从轻 +从速 +从重 +他 +他人 +他们 +他是 +他的 +代替 +以 +以上 +以下 +以为 +以便 +以免 +以前 +以及 +以后 +以外 +以後 +以故 +以期 +以来 +以至 +以至于 +以致 +们 +任 +任何 +任凭 +任务 +企图 +伙同 +会 +伟大 +传 +传说 +传闻 +似乎 +似的 +但 +但凡 +但愿 +但是 +何 +何乐而不为 +何以 +何况 +何处 +何妨 +何尝 +何必 +何时 +何止 +何苦 +何须 +余外 +作为 +你 +你们 +你是 +你的 +使 +使得 +使用 +例如 +依 +依据 +依照 +依靠 +便 +便于 +促进 +保持 +保管 +保险 +俺 +俺们 +倍加 +倍感 +倒不如 +倒不如说 +倒是 +倘 +倘使 +倘或 +倘然 +倘若 +借 +借以 +借此 +假使 +假如 +假若 +偏偏 +做到 +偶尔 +偶而 +傥然 +像 +儿 +允许 +元/吨 +充其极 +充其量 +充分 +先不先 +先后 +先後 +先生 +光 +光是 +全体 +全力 +全年 +全然 +全身心 +全部 +全都 +全面 +八 +八成 +公然 +六 +兮 +共 +共同 +共总 +关于 +其 +其一 +其中 +其二 +其他 +其余 +其后 +其它 +其实 +其次 +具体 +具体地说 +具体来说 +具体说来 +具有 +兼之 +内 +再 +再其次 +再则 +再有 +再次 +再者 +再者说 +再说 +冒 +冲 +决不 +决定 +决非 +况且 +准备 +凑巧 +凝神 +几 +几乎 +几度 +几时 +几番 +几经 +凡 +凡是 +凭 +凭借 +出 +出于 +出去 +出来 +出现 +分别 +分头 +分期 +分期分批 +切 +切不可 +切切 +切勿 +切莫 +则 +则甚 +刚 +刚好 +刚巧 +刚才 +初 +别 +别人 +别处 +别是 +别的 +别管 +别说 +到 +到了儿 +到处 +到头 +到头来 +到底 +到目前为止 +前后 +前此 +前者 +前进 +前面 +加上 +加之 +加以 +加入 +加强 +动不动 +动辄 +勃然 +匆匆 +十分 +千 +千万 +千万千万 +半 +单 +单单 +单纯 +即 +即令 +即使 +即便 +即刻 +即如 +即将 +即或 +即是说 +即若 +却 +却不 +历 +原来 +去 +又 +又及 +及 +及其 +及时 +及至 +双方 +反之 +反之亦然 +反之则 +反倒 +反倒是 +反应 +反手 +反映 +反而 +反过来 +反过来说 +取得 +取道 +受到 +变成 +古来 +另 +另一个 +另一方面 +另外 +另悉 +另方面 +另行 +只 +只当 +只怕 +只是 +只有 +只消 +只要 +只限 +叫 +叫做 +召开 +叮咚 +叮当 +可 +可以 +可好 +可是 +可能 +可见 +各 +各个 +各人 +各位 +各地 +各式 +各种 +各级 +各自 +合理 +同 +同一 +同时 +同样 +后 +后来 +后者 +后面 +向 +向使 +向着 +吓 +吗 +否则 +吧 +吧哒 +吱 +呀 +呃 +呆呆地 +呐 +呕 +呗 +呜 +呜呼 +呢 +周围 +呵 +呵呵 +呸 +呼哧 +呼啦 +咋 +和 +咚 +咦 +咧 +咱 +咱们 +咳 +哇 +哈 +哈哈 +哉 +哎 +哎呀 +哎哟 +哗 +哗啦 +哟 +哦 +哩 +哪 +哪个 +哪些 +哪儿 +哪天 +哪年 +哪怕 +哪样 +哪边 +哪里 +哼 +哼唷 +唉 +唯有 +啊 +啊呀 +啊哈 +啊哟 +啐 +啥 +啦 +啪达 +啷当 +喀 +喂 +喏 +喔唷 +喽 +嗡 +嗡嗡 +嗬 +嗯 +嗳 +嘎 +嘎嘎 +嘎登 +嘘 +嘛 +嘻 +嘿 +嘿嘿 +四 +因 +因为 +因了 +因此 +因着 +因而 +固 +固然 +在 +在下 +在于 +地 +均 +坚决 +坚持 +基于 +基本 +基本上 +处在 +处处 +处理 +复杂 +多 +多么 +多亏 +多多 +多多少少 +多多益善 +多少 +多年前 +多年来 +多数 +多次 +够瞧的 +大 +大不了 +大举 +大事 +大体 +大体上 +大凡 +大力 +大多 +大多数 +大大 +大家 +大张旗鼓 +大批 +大抵 +大概 +大略 +大约 +大致 +大都 +大量 +大面儿上 +失去 +奇 +奈 +奋勇 +她 +她们 +她是 +她的 +好 +好在 +好的 +好象 +如 +如上 +如上所述 +如下 +如今 +如何 +如其 +如前所述 +如同 +如常 +如是 +如期 +如果 +如次 +如此 +如此等等 +如若 +始而 +姑且 +存在 +存心 +孰料 +孰知 +宁 +宁可 +宁愿 +宁肯 +它 +它们 +它们的 +它是 +它的 +安全 +完全 +完成 +定 +实现 +实际 +宣布 +容易 +密切 +对 +对于 +对应 +对待 +对方 +对比 +将 +将才 +将要 +将近 +小 +少数 +尔 +尔后 +尔尔 +尔等 +尚且 +尤其 +就 +就地 +就是 +就是了 +就是说 +就此 +就算 +就要 +尽 +尽可能 +尽如人意 +尽心尽力 +尽心竭力 +尽快 +尽早 +尽然 +尽管 +尽管如此 +尽量 +局外 +居然 +届时 +属于 +屡 +屡屡 +屡次 +屡次三番 +岂 +岂但 +岂止 +岂非 +川流不息 +左右 +巨大 +巩固 +差一点 +差不多 +己 +已 +已矣 +已经 +巴 +巴巴 +带 +帮助 +常 +常常 +常言说 +常言说得好 +常言道 +平素 +年复一年 +并 +并不 +并不是 +并且 +并排 +并无 +并没 +并没有 +并肩 +并非 +广大 +广泛 +应当 +应用 +应该 +庶乎 +庶几 +开外 +开始 +开展 +引起 +弗 +弹指之间 +强烈 +强调 +归 +归根到底 +归根结底 +归齐 +当 +当下 +当中 +当儿 +当前 +当即 +当口儿 +当地 +当场 +当头 +当庭 +当时 +当然 +当真 +当着 +形成 +彻夜 +彻底 +彼 +彼时 +彼此 +往 +往往 +待 +待到 +很 +很多 +很少 +後来 +後面 +得 +得了 +得出 +得到 +得天独厚 +得起 +心里 +必 +必定 +必将 +必然 +必要 +必须 +快 +快要 +忽地 +忽然 +怎 +怎么 +怎么办 +怎么样 +怎奈 +怎样 +怎麽 +怕 +急匆匆 +怪 +怪不得 +总之 +总是 +总的来看 +总的来说 +总的说来 +总结 +总而言之 +恍然 +恐怕 +恰似 +恰好 +恰如 +恰巧 +恰恰 +恰恰相反 +恰逢 +您 +您们 +您是 +惟其 +惯常 +意思 +愤然 +愿意 +慢说 +成为 +成年 +成年累月 +成心 +我 +我们 +我是 +我的 +或 +或则 +或多或少 +或是 +或曰 +或者 +或许 +战斗 +截然 +截至 +所 +所以 +所在 +所幸 +所有 +所谓 +才 +才能 +扑通 +打 +打从 +打开天窗说亮话 +扩大 +把 +抑或 +抽冷子 +拦腰 +拿 +按 +按时 +按期 +按照 +按理 +按说 +挨个 +挨家挨户 +挨次 +挨着 +挨门挨户 +挨门逐户 +换句话说 +换言之 +据 +据实 +据悉 +据我所知 +据此 +据称 +据说 +掌握 +接下来 +接着 +接著 +接连不断 +放量 +故 +故意 +故此 +故而 +敞开儿 +敢 +敢于 +敢情 +数/ +整个 +断然 +方 +方便 +方才 +方能 +方面 +旁人 +无 +无宁 +无法 +无论 +既 +既...又 +既往 +既是 +既然 +日复一日 +日渐 +日益 +日臻 +日见 +时候 +昂然 +明显 +明确 +是 +是不是 +是以 +是否 +是的 +显然 +显著 +普通 +普遍 +暗中 +暗地里 +暗自 +更 +更为 +更加 +更进一步 +曾 +曾经 +替 +替代 +最 +最后 +最大 +最好 +最後 +最近 +最高 +有 +有些 +有关 +有利 +有力 +有及 +有所 +有效 +有时 +有点 +有的 +有的是 +有着 +有著 +望 +朝 +朝着 +末##末 +本 +本人 +本地 +本着 +本身 +权时 +来 +来不及 +来得及 +来看 +来着 +来自 +来讲 +来说 +极 +极为 +极了 +极其 +极力 +极大 +极度 +极端 +构成 +果然 +果真 +某 +某个 +某些 +某某 +根据 +根本 +格外 +梆 +概 +次第 +欢迎 +欤 +正值 +正在 +正如 +正巧 +正常 +正是 +此 +此中 +此后 +此地 +此处 +此外 +此时 +此次 +此间 +殆 +毋宁 +每 +每个 +每天 +每年 +每当 +每时每刻 +每每 +每逢 +比 +比及 +比如 +比如说 +比方 +比照 +比起 +比较 +毕竟 +毫不 +毫无 +毫无例外 +毫无保留地 +汝 +沙沙 +没 +没奈何 +没有 +沿 +沿着 +注意 +活 +深入 +清楚 +满 +满足 +漫说 +焉 +然 +然则 +然后 +然後 +然而 +照 +照着 +牢牢 +特别是 +特殊 +特点 +犹且 +犹自 +独 +独自 +猛然 +猛然间 +率尔 +率然 +现代 +现在 +理应 +理当 +理该 +瑟瑟 +甚且 +甚么 +甚或 +甚而 +甚至 +甚至于 +用 +用来 +甫 +甭 +由 +由于 +由是 +由此 +由此可见 +略 +略为 +略加 +略微 +白 +白白 +的 +的确 +的话 +皆可 +目前 +直到 +直接 +相似 +相信 +相反 +相同 +相对 +相对而言 +相应 +相当 +相等 +省得 +看 +看上去 +看出 +看到 +看来 +看样子 +看看 +看见 +看起来 +真是 +真正 +眨眼 +着 +着呢 +矣 +矣乎 +矣哉 +知道 +砰 +确定 +碰巧 +社会主义 +离 +种 +积极 +移动 +究竟 +穷年累月 +突出 +突然 +窃 +立 +立刻 +立即 +立地 +立时 +立马 +竟 +竟然 +竟而 +第 +第二 +等 +等到 +等等 +策略地 +简直 +简而言之 +简言之 +管 +类如 +粗 +精光 +紧接着 +累年 +累次 +纯 +纯粹 +纵 +纵令 +纵使 +纵然 +练习 +组成 +经 +经常 +经过 +结合 +结果 +给 +绝 +绝不 +绝对 +绝非 +绝顶 +继之 +继后 +继续 +继而 +维持 +综上所述 +缕缕 +罢了 +老 +老大 +老是 +老老实实 +考虑 +者 +而 +而且 +而况 +而又 +而后 +而外 +而已 +而是 +而言 +而论 +联系 +联袂 +背地里 +背靠背 +能 +能否 +能够 +腾 +自 +自个儿 +自从 +自各儿 +自后 +自家 +自己 +自打 +自身 +臭 +至 +至于 +至今 +至若 +致 +般的 +良好 +若 +若夫 +若是 +若果 +若非 +范围 +莫 +莫不 +莫不然 +莫如 +莫若 +莫非 +获得 +藉以 +虽 +虽则 +虽然 +虽说 +蛮 +行为 +行动 +表明 +表示 +被 +要 +要不 +要不是 +要不然 +要么 +要是 +要求 +见 +规定 +觉得 +譬喻 +譬如 +认为 +认真 +认识 +让 +许多 +论 +论说 +设使 +设或 +设若 +诚如 +诚然 +话说 +该 +该当 +说明 +说来 +说说 +请勿 +诸 +诸位 +诸如 +谁 +谁人 +谁料 +谁知 +谨 +豁然 +贼死 +赖以 +赶 +赶快 +赶早不赶晚 +起 +起先 +起初 +起头 +起来 +起见 +起首 +趁 +趁便 +趁势 +趁早 +趁机 +趁热 +趁着 +越是 +距 +跟 +路经 +转动 +转变 +转贴 +轰然 +较 +较为 +较之 +较比 +边 +达到 +达旦 +迄 +迅速 +过 +过于 +过去 +过来 +运用 +近 +近几年来 +近年来 +近来 +还 +还是 +还有 +还要 +这 +这一来 +这个 +这么 +这么些 +这么样 +这么点儿 +这些 +这会儿 +这儿 +这就是说 +这时 +这样 +这次 +这点 +这种 +这般 +这边 +这里 +这麽 +进入 +进去 +进来 +进步 +进而 +进行 +连 +连同 +连声 +连日 +连日来 +连袂 +连连 +迟早 +迫于 +适应 +适当 +适用 +逐步 +逐渐 +通常 +通过 +造成 +逢 +遇到 +遭到 +遵循 +遵照 +避免 +那 +那个 +那么 +那么些 +那么样 +那些 +那会儿 +那儿 +那时 +那末 +那样 +那般 +那边 +那里 +那麽 +部分 +都 +鄙人 +采取 +里面 +重大 +重新 +重要 +鉴于 +针对 +长期以来 +长此下去 +长线 +长话短说 +问题 +间或 +防止 +阿 +附近 +陈年 +限制 +陡然 +除 +除了 +除却 +除去 +除外 +除开 +除此 +除此之外 +除此以外 +除此而外 +除非 +随 +随后 +随时 +随着 +随著 +隔夜 +隔日 +难得 +难怪 +难说 +难道 +难道说 +集中 +零 +需要 +非但 +非常 +非徒 +非得 +非特 +非独 +靠 +顶多 +顷 +顷刻 +顷刻之间 +顷刻间 +顺 +顺着 +顿时 +颇 +风雨无阻 +饱 +首先 +马上 +高低 +高兴 +默然 +默默地 +齐 +︿ +! +# +$ +% +& +' +( +) +)÷(1- +)、 +* ++ ++ξ +++ +, +,也 +- +-β +-- +-[*]- +. +/ +0 +0:2 +1 +1. +12% +2 +2.3% +3 +4 +5 +5:0 +6 +7 +8 +9 +: +; +< +<± +<Δ +<λ +<φ +<< += +=″ +=☆ +=( +=- +=[ +={ +> +>λ +? +@ +A +LI +R.L. +ZXFITL +[ +[①①] +[①②] +[①③] +[①④] +[①⑤] +[①⑥] +[①⑦] +[①⑧] +[①⑨] +[①A] +[①B] +[①C] +[①D] +[①E] +[①] +[①a] +[①c] +[①d] +[①e] +[①f] +[①g] +[①h] +[①i] +[①o] +[② +[②①] +[②②] +[②③] +[②④ +[②⑤] +[②⑥] +[②⑦] +[②⑧] +[②⑩] +[②B] +[②G] +[②] +[②a] +[②b] +[②c] +[②d] +[②e] +[②f] +[②g] +[②h] +[②i] +[②j] +[③①] +[③⑩] +[③F] +[③] +[③a] +[③b] +[③c] +[③d] +[③e] +[③g] +[③h] +[④] +[④a] +[④b] +[④c] +[④d] +[④e] +[⑤] +[⑤]] +[⑤a] +[⑤b] +[⑤d] +[⑤e] +[⑤f] +[⑥] +[⑦] +[⑧] +[⑨] +[⑩] +[*] +[- +[] +] +]∧′=[ +][ +_ +a] +b] +c] +e] +f] +ng昉 +{ +{- +| +} +}> +~ +~± +~+ +¥ diff --git a/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/test/train_full_1p.sh b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/test/train_full_1p.sh new file mode 100644 index 000000000..ffa2ee90c --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/test/train_full_1p.sh @@ -0,0 +1,173 @@ +#!/bin/bash +#当前路径,不需要修改 +cur_path=`pwd`/../ +#集合通信参数,不需要修改 +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=0 +# 数据集路径,保持为空,不需要修改 +data_path="" +#设置默认日志级别,不需要修改 +#export ASCEND_GLOBAL_LOG_LEVEL=3 +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="word2vec_ID2886_for_TensorFlow2.X" +#训练epoch +train_epochs=1 +#训练batch_size +batch_size=128 +#训练step +train_steps=3000000 +#学习率 +learning_rate=1 +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=True +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --autotune whether to enable autotune, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --train_steps* ]];then + train_steps=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path +#设置环境变量,不需要修改 +echo "Device ID: $ASCEND_DEVICE_ID" + +#创建DeviceID输出目录,不需要修改 +if [ -d ${cur_path}/test/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/test/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt +else + mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt +fi +#执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + +#训练开始时间,不需要修改 +start_time=$(date +%s) +#python3 word2vec_chinese.py -dataset_dir=$data_path $step --precision_mode=$precision_mode > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 +nohup python3 word2vec_chinese.py \ + --dataset_dir=$data_path \ + --step=$train_steps \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune} > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +train_time=`grep -a 'time cost' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${ASCEND_DEVICE_ID}.log |awk 'END {print $9}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'2000'*'${batch_size}'/'${train_time}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $train_time" +#输出训练精度,需要模型审视修改 +train_accuracy=`grep -a 'loss' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${ASCEND_DEVICE_ID}.log |awk 'END{print $5}' | awk -F ',' '{print $1}'` + +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" +echo "E2E Training Duration sec : $e2e_time" +#稳定性精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'acc' +##获取性能数据 +#吞吐量,不需要修改 +ActualFPS=${FPS} +#单迭代训练时长,不需要修改 +TrainingTime=$train_time +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk -F"loss = " '{print $2}' | awk -F ',' '{print $1}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/test/train_performance_1p.sh b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/test/train_performance_1p.sh new file mode 100644 index 000000000..b8955010a --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/test/train_performance_1p.sh @@ -0,0 +1,173 @@ +#!/bin/bash +#当前路径,不需要修改 +cur_path=`pwd`/../ +#集合通信参数,不需要修改 +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=0 +# 数据集路径,保持为空,不需要修改 +data_path="" +#设置默认日志级别,不需要修改 +#export ASCEND_GLOBAL_LOG_LEVEL=3 +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="word2vec_ID2886_for_TensorFlow2.X" +#训练epoch +train_epochs=1 +#训练batch_size +batch_size=128 +#训练step +train_steps=30000 +#学习率 +learning_rate=1 +############维测参数############## +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +if [[ $over_dump == True ]];then + over_dump_path=$cur_path/test/overflow_dump #此处cur_path为代码根目录 + mkdir -p ${over_dump_path} +fi +data_dump_flag=False +data_dump_step="10" +profiling=False +use_mixlist=True +mixlist_file="./configs/ops_info.json" +fusion_off_flag=False +fusion_off_file="./configs/fusion_switch.cfg" +auto_tune=False +############维测参数############## +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1p.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --autotune whether to enable autotune, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --train_steps* ]];then + train_steps=`echo ${para#*=}` + elif [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --use_mixlist* ]];then + use_mixlist=`echo ${para#*=}` + elif [[ $para == --mixlist_file* ]];then + mixlist_file=`echo ${para#*=}` + elif [[ $para == --fusion_off_flag* ]];then + fusion_off_flag=`echo ${para#*=}` + elif [[ $para == --fusion_off_file* ]];then + fusion_off_file=`echo ${para#*=}` + elif [[ $para == --auto_tune* ]];then + auto_tune=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path +#设置环境变量,不需要修改 +echo "Device ID: $ASCEND_DEVICE_ID" + +#创建DeviceID输出目录,不需要修改 +if [ -d ${cur_path}/test/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/test/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt +else + mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt +fi +#执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + +#训练开始时间,不需要修改 +start_time=$(date +%s) +#python3 word2vec_chinese.py -dataset_dir=$data_path $step --precision_mode=$precision_mode > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 +nohup python3 word2vec_chinese.py \ + --dataset_dir=$data_path \ + --step=$train_steps \ + --precision_mode=${precision_mode} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --profiling=${profiling} \ + --use_mixlist=${use_mixlist} \ + --fusion_off_flag=${fusion_off_flag} \ + --mixlist_file=${mixlist_file} \ + --fusion_off_file=${fusion_off_file} \ + --profiling_dump_path=${profiling_dump_path} \ + --auto_tune=${auto_tune} > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +train_time=`grep -a 'time cost' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${ASCEND_DEVICE_ID}.log |awk 'END {print $9}'` +FPS=`awk 'BEGIN{printf "%.2f\n",'2000'*'${batch_size}'/'${train_time}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $train_time" +#输出训练精度,需要模型审视修改 +train_accuracy=`grep -a 'loss' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${ASCEND_DEVICE_ID}.log |awk 'END{print $5}' | awk -F ',' '{print $1}'` + +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" +echo "E2E Training Duration sec : $e2e_time" +#稳定性精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf' +##获取性能数据 +#吞吐量,不需要修改 +ActualFPS=${FPS} +#单迭代训练时长,不需要修改 +TrainingTime=$train_time +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep loss $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk -F"loss = " '{print $2}' | awk -F ',' '{print $1}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/word2vec_chinese.py b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/word2vec_chinese.py new file mode 100644 index 000000000..f82016aa1 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/word2vec_ID2886_for_TensorFlow2.X/word2vec_chinese.py @@ -0,0 +1,362 @@ +#!usr/bin/env python +# -*- coding:utf-8 -*- +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import math +import random +import jieba +import numpy as np +from six.moves import xrange +import tensorflow as tf +import time +import sys +import os +import argparse +import ast + +import npu_device +from npu_device.compat.v1.npu_init import * +npu_device.compat.enable_v1() + +parser = argparse.ArgumentParser(description='config') +parser.add_argument('--dataset_dir', type=str, default='./cnews', help='dataset dir') +parser.add_argument('--step', type=int, default=30000, help='train steps') +parser.add_argument('--precision_mode', type=str, default='allow_mix_precision', help='precision mode') + +#===============================NPU Migration========================================= +parser.add_argument('--over_dump', dest='over_dump', type=ast.literal_eval, + help='if or not over detection, default is False') +parser.add_argument('--data_dump_flag', dest='data_dump_flag', type=ast.literal_eval, + help='data dump flag, default is False') +parser.add_argument('--data_dump_step', default="10", + help='data dump step, default is 10') +parser.add_argument('--profiling', dest='profiling', type=ast.literal_eval,help='if or not profiling for performance debug, default is False') +parser.add_argument('--profiling_dump_path', default="/home/data", type=str,help='the path to save profiling data') +parser.add_argument('--over_dump_path', default="/home/data", type=str,help='the path to save over dump data') +parser.add_argument('--data_dump_path', default="/home/data", type=str,help='the path to save dump data') +parser.add_argument('--use_mixlist', dest='use_mixlist', type=ast.literal_eval, + help='use_mixlist flag, default is False') +parser.add_argument('--fusion_off_flag', dest='fusion_off_flag', type=ast.literal_eval, + help='fusion_off flag, default is False') +parser.add_argument('--mixlist_file', default="ops_info.json", type=str,help='mixlist file name, default is ops_info.json') +parser.add_argument('--fusion_off_file', default="fusion_switch.cfg", type=str,help='fusion_off file name, default is fusion_switch.cfg') +parser.add_argument('--auto_tune', dest='auto_tune', type=ast.literal_eval, help='autotune, default is False') +#===============================NPU Migration========================================= + +FLAGS = parser.parse_args() + +def npu_tf_optimizer(opt): + npu_opt = NPUDistributedOptimizer(opt) + # if FLAGS.precision_mode == "allow_mix_precision": + # loss_scale_manager = ExponentialUpdateLossScaleManager( + # init_loss_scale=2**32, + # incr_every_n_steps=1000, + # decr_every_n_nan_or_inf=2, + # decr_ratio=0.5) + # npu_opt = NPULossScaleOptimizer(npu_opt, loss_scale_manager) + return npu_opt + +def npu_session_config_init(session_config=None): + session_config = tf.compat.v1.ConfigProto() + custom_op = session_config.graph_options.rewrite_options.custom_optimizers.add() + custom_op.name = 'NpuOptimizer' + custom_op.parameter_map["enable_data_pre_proc"].b = True + custom_op.parameter_map["iterations_per_loop"].i = 10 + if FLAGS.data_dump_flag: + custom_op.parameter_map["enable_dump"].b = True + custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.data_dump_path) + custom_op.parameter_map["dump_step"].s = tf.compat.as_bytes(FLAGS.data_dump_step) + custom_op.parameter_map["dump_mode"].s = tf.compat.as_bytes("all") + if FLAGS.over_dump: + custom_op.parameter_map["enable_dump_debug"].b = True + custom_op.parameter_map["dump_path"].s = tf.compat.as_bytes(FLAGS.over_dump_path) + custom_op.parameter_map["dump_debug_mode"].s = tf.compat.as_bytes("all") + if FLAGS.profiling: + custom_op.parameter_map["precision_mode"].b = True + profiling_options = '{"output":"' + FLAGS.profiling_dump_path + '", \ + "training_trace":"on", \ + "task_trace":"on", \ + "aicpu":"on", \ + "aic_metrics":"PipeUtilization",\ + "fp_point":"", \ + "bp_point":""}' + custom_op.parameter_map["profiling_options"].s = tf.compat.as_bytes(profiling_options) + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(FLAGS.precision_mode) + if FLAGS.use_mixlist and FLAGS.precision_mode=='allow_mix_precision': + custom_op.parameter_map["modify_mixlist"].s = tf.compat.as_bytes(FLAGS.mixlist_file) + if FLAGS.fusion_off_flag: + custom_op.parameter_map["sfusion_switch_file"].s = tf.compat.as_bytes(FLAGS.fusion_off_file) + if FLAGS.auto_tune: + custom_op.parameter_map["auto_tune_mode"].s = tf.compat.as_bytes("RL,GA") + session_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF + session_config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF + return session_config + +# Step 1: Download the data. +# Read the data into a list of strings. +def read_data(): + global FLAGS + """ + 对要训练的文本进行处理,最后把文本的内容的所有词放在一个列表中 + """ + #读取停用词 + stop_words = [] + with open('stop_words.txt',"r",encoding="UTF-8") as f: + line = f.readline() + while line: + stop_words.append(line[:(-1)]) + line = f.readline() + stop_words = set(stop_words) + print('停用词读取完毕,共{n}个词'.format(n=len(stop_words))) + + # 读取文本,预处理,分词,得到词典 + raw_word_list = [] + # with open('doupocangqiong.txt',"r", encoding='UTF-8') as f: + val_txt = os.path.join(FLAGS.dataset_dir, './cnews.val.txt') + with open(val_txt, 'r', encoding='UTF-8') as f: + line = f.readline() + while line: + while ('\n' in line): + line = line.replace('\n','') + while (' ' in line): + line = line.replace(' ','') + if (len(line)>0): # 如果句子非空 + raw_words = list(jieba.cut(line,cut_all=False)) + raw_word_list.extend(raw_words) + line=f.readline() + return raw_word_list + +#step 1:读取文件中的内容组成一个列表 +words = read_data() +print('Data size', len(words)) + +# Step 2: Build the dictionary and replace rare words with UNK token. +vocabulary_size = 50000 + +def build_dataset(words): + count = [['UNK', (-1)]] + count.extend(collections.Counter(words).most_common(vocabulary_size - 1)) + print("count",len(count)) + dictionary = dict() + for (word, _) in count: + dictionary[word] = len(dictionary) + data = list() + unk_count = 0 + for word in words: + if (word in dictionary): + index = dictionary[word] + else: + index = 0 + unk_count += 1 + data.append(index) + count[0][1] = unk_count + reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) + return data, count, dictionary, reverse_dictionary + +data, count, dictionary, reverse_dictionary = build_dataset(words) +#删除words节省内存 +del words +print('Most common words (+UNK)', count[:5]) +print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]]) + +data_index = 0 + + +# Step 3: Function to generate a training batch for the skip-gram model. +def generate_batch(batch_size, num_skips, skip_window): + global data_index + assert batch_size % num_skips == 0 + assert num_skips <= 2 * skip_window + batch = np.ndarray(shape=(batch_size), dtype=np.int32) + labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) + span = 2 * skip_window + 1 # [ skip_window target skip_window ] + buffer = collections.deque(maxlen=span) + for _ in range(span): + buffer.append(data[data_index]) + data_index = (data_index + 1) % len(data) + for i in range(batch_size // num_skips): + target = skip_window # target label at the center of the buffer + targets_to_avoid = [skip_window] + for j in range(num_skips): + while target in targets_to_avoid: + target = random.randint(0, span - 1) + targets_to_avoid.append(target) + batch[i * num_skips + j] = buffer[skip_window] + labels[i * num_skips + j, 0] = buffer[target] + buffer.append(data[data_index]) + data_index = (data_index + 1) % len(data) + return batch, labels + +batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1) +for i in range(8): + print(batch[i], reverse_dictionary[batch[i]],'->', labels[i, 0], reverse_dictionary[labels[i, 0]]) + +# Step 4: Build and train a skip-gram model. +batch_size = 128 +embedding_size = 128 +skip_window = 1 +num_skips = 2 +valid_size = 9 #切记这个数字要和len(valid_word)对应,要不然会报错哦 +valid_window = 100 +num_sampled = 64 # Number of negative examples to sample. + +#验证集 +# valid_word = ['萧炎','灵魂','火焰','萧薰儿','药老','天阶',"云岚宗","乌坦城","惊诧"] +valid_word = ['城市', '记者', '体育', '教练', '足球', '赛季', '奥运会', '丑闻', '足协'] +valid_examples =[dictionary[li] for li in valid_word] + +graph = tf.Graph() +with graph.as_default(): + # Input data. + train_inputs = tf.compat.v1.placeholder(tf.int32, shape=[batch_size]) + train_labels = tf.compat.v1.placeholder(tf.int32, shape=[batch_size, 1]) + valid_dataset = tf.constant(valid_examples, dtype=tf.int32) + + # Ops and variables pinned to the CPU because of missing GPU implementation + with tf.device('/cpu:0'): + # Look up embeddings for inputs. + embeddings = tf.Variable( + tf.random.uniform([vocabulary_size, embedding_size], -1.0, 1.0)) + embed = tf.nn.embedding_lookup(params=embeddings, ids=train_inputs) + + # Construct the variables for the NCE loss + nce_weights = tf.Variable( + tf.random.truncated_normal([vocabulary_size, embedding_size], + stddev=1.0 / math.sqrt(embedding_size))) + nce_biases = tf.Variable(tf.zeros([vocabulary_size]),dtype=tf.float32) + + # Compute the average NCE loss for the batch. + # tf.nce_loss automatically draws a new sample of the negative labels each + # time we evaluate the loss. + loss = tf.reduce_mean(input_tensor=tf.nn.nce_loss(weights=nce_weights, + biases=nce_biases, + inputs=embed, + labels=train_labels, + num_sampled=num_sampled, + num_classes=vocabulary_size)) + + # Construct the SGD optimizer using a learning rate of 1.0. + optimizer = npu_tf_optimizer(tf.compat.v1.train.GradientDescentOptimizer(1.0)).minimize(loss) + + # Compute the cosine similarity between minibatch examples and all embeddings. + norm = tf.sqrt(tf.reduce_sum(input_tensor=tf.square(embeddings), axis=1, keepdims=True)) + normalized_embeddings = embeddings / norm + valid_embeddings = tf.nn.embedding_lookup(params=normalized_embeddings, ids=valid_dataset) + similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True) + + # Add variable initializer. + init = tf.compat.v1.global_variables_initializer() + +# Step 5: Begin training. +num_steps = FLAGS.step + +with tf.compat.v1.Session(graph=graph, config=npu_session_config_init()) as session: + # We must initialize all variables before we use them. + init.run() + print("Initialized") + + average_loss = 0 + duration = 0 + + for step in xrange(num_steps): + start_time = time.time() + batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window) + feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels} + + # We perform one update step by evaluating the optimizer op (including it + # in the list of returned values for session.run() + _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict) + average_loss += loss_val + duration += (time.time() - start_time) + + if step % 200 == 0: + if step > 0: + average_loss /= 200 + # The average loss is an estimate of the loss over the last 2000 batches. + # print("Average loss at step ", step, ": ", average_loss) + print('step= ', step, 'loss = {:3f}, time cost = {:4f} s'.format(average_loss, duration)) + average_loss = 0 + duration=0 + + # Note that this is expensive (~20% slowdown if computed every 500 steps) + if step % 10000 == 0: + sim = similarity.eval() + for i in xrange(valid_size): + valid_word = reverse_dictionary[valid_examples[i]] + top_k = 8 # number of nearest neighbors + nearest = (-sim[i, :]).argsort()[:top_k] + log_str = "Nearest to %s:" % valid_word + for k in xrange(top_k): + close_word = reverse_dictionary[nearest[k]] + log_str = "%s %s," % (log_str, close_word) + print(log_str) + final_embeddings = normalized_embeddings.eval() + +# Step 6: Visualize the embeddings. +def plot_with_labels(low_dim_embs, labels, filename='images/tsne3.png',fonts=None): + assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings" + plt.figure(figsize=(18, 18)) # in inches + for i, label in enumerate(labels): + x, y = low_dim_embs[i, :] + plt.scatter(x, y) + plt.annotate(label, + fontproperties=fonts, + xy=(x, y), + xytext=(5, 2), + textcoords='offset points', + ha='right', + va='bottom') + + plt.savefig(filename,dpi=800) + +try: + from sklearn.manifold import TSNE + import matplotlib.pyplot as plt + from matplotlib.font_manager import FontProperties + + #为了在图片上能显示出中文 + # font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14) + f_name = os.path.join(FLAGS.dataset_dir, "./SIMSUN.TTC") + font = FontProperties(fname=f_name, size=14) + + tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) + plot_only = 500 + low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :]) + labels = [reverse_dictionary[i] for i in xrange(plot_only)] + plot_with_labels(low_dim_embs, labels,fonts=font) + + +except ImportError: + print("Please install sklearn, matplotlib, and scipy to visualize embeddings.") \ No newline at end of file -- Gitee From 73cf1b02f07b60e5a4751a1b00474035a3a53830 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 07:56:58 +0000 Subject: [PATCH 52/54] add TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/modelzoo_level.txt. --- .../modelzoo_level.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/modelzoo_level.txt diff --git a/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..0b49b4fb2 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/knowledge_disti_ID2517_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:OK +PrecisionStatus:OK \ No newline at end of file -- Gitee From 8e48e162363b1da7585c39c2cc2e3cfc4f8ac52e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 08:11:14 +0000 Subject: [PATCH 53/54] add text_classification_with_transformer_ID2563_for_TensorFlow2.X/modelzoo_level.txt. --- .../modelzoo_level.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/modelzoo_level.txt diff --git a/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/modelzoo_level.txt b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/modelzoo_level.txt new file mode 100644 index 000000000..0b49b4fb2 --- /dev/null +++ b/TensorFlow2/built-in/keras_sample/text_classification_with_transformer_ID2563_for_TensorFlow2.X/modelzoo_level.txt @@ -0,0 +1,3 @@ +FuncStatus:OK +PerfStatus:OK +PrecisionStatus:OK \ No newline at end of file -- Gitee From 2c16e7e59dc4e3643bd830a62805c99b28eb164d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=96=87=E8=BD=A9?= <10359565+zhang-wenxuan09@user.noreply.gitee.com> Date: Mon, 13 Jun 2022 12:01:20 +0000 Subject: [PATCH 54/54] update eypoint_detection_ID2516_for_TensorFlow2.X/keypoint_detection.py. --- .../keypoint_detection.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/keypoint_detection.py b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/keypoint_detection.py index 6f41f747e..7a39a3b19 100644 --- a/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/keypoint_detection.py +++ b/TensorFlow2/built-in/keras_sample/keypoint_detection_ID2516_for_TensorFlow2.X/keypoint_detection.py @@ -422,7 +422,8 @@ print('-------------------------------------------------------------------TTTTTT #print(train_dataset.shape) print(type(train_dataset)) if args.static==1: - train_dataset=(np.array(train_dataset[0][:4096]),[np.array(train_dataset[1][:4096]), np.array(train_dataset[2][:4096]), np.array(train_dataset[3][:4096])]) + print('static') + #train_dataset=(np.array(train_dataset[0][:4096]),[np.array(train_dataset[1][:4096]), np.array(train_dataset[2][:4096]), np.array(train_dataset[3][:4096])]) validation_dataset = KeyPointsDataset(validation_keys, test_aug, train=False) print(f"Total batches in training set: {len(train_dataset)}") -- Gitee