diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/LICENSE b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..0f45a2c79aa9e815ecbb58bc3e5e8cc1ef8f1dc4
--- /dev/null
+++ b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/LICENSE
@@ -0,0 +1,27 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/README.md b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..35baea161c7bb518f36bc444929cf95991a15461
--- /dev/null
+++ b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/README.md
@@ -0,0 +1,167 @@
+- [基本信息](#基本信息.md)
+- [概述](#概述.md)
+- [训练环境准备](#训练环境准备.md)
+- [快速上手](#快速上手.md)
+- [迁移学习指导](#迁移学习指导.md)
+- [高级参考](#高级参考.md)
+
基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):Object Detection**
+
+**版本(Version):1.1**
+
+**修改时间(Modified) :2022.09.02**
+
+**框架(Framework):TensorFlow_1.15.0**
+
+**模型格式(Model Format):ckpt**
+
+**精度(Precision):Mixed**
+
+**处理器(Processor):昇腾910**
+
+**应用级别(Categories):Official**
+
+**描述(Description):基于TensorFlow框架的pairedCycleGAN模型训练代码**
+
+概述
+
+## 简述
+
+我们的无监督学习方法依赖于一个新的周期一致的生成性对抗网络框架与图像域传输问题不同,我们的样式传输问题涉及两个不对称函数 :前向函数编码基于示例的样式传输,而后向函数删除样式 。我们构建了两个耦合网络来实现这些功能一个传输化妆风格,另一个可以去除化妆效果——这样,它们连续应用到输入照片的输出将与输入匹配 。学习风格网络可以快速将任意化妆风格应用于任意照片 。我们在广泛的肖像和风格上展示了这种效果 。
+
+- 参考论文:
+
+ https://ieeexplore.ieee.org/document/8578110
+
+- 参考实现:
+
+ https://github.com/baldFemale/beautyGAN-tf-Implement
+
+- 适配昇腾 AI 处理器的实现:
+ https://gitee.com/ascend/ModelZoo-TensorFlow/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow
+
+
+
+## 默认配置
+
+- 训练超参(单卡):
+
+ - EPOCHS 90
+ - LEARNING_RATE 1e-04
+ - img_height = 256
+ - img_width = 256
+ - img_layer = 3
+
+
+## 支持特性
+
+| 特性列表 | 是否支持 |
+| ---------- | -------- |
+| 分布式训练 | 否 |
+| 混合精度 | 是 |
+| 数据并行 | 否 |
+
+
+## 混合精度训练
+
+昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。
+
+## 开启混合精度
+
+模型默认开启混合精度:
+
+```
+ config = tf.ConfigProto()
+ custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
+ custom_op.name = "NpuOptimizer"
+ custom_op.parameter_map["use_off_line"].b = True
+ custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
+
+```
+
+训练环境准备
+
+- 硬件环境和运行环境准备请参见《[CANN软件安装指南](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-update)》
+- 运行以下命令安装依赖。
+```
+pip3 install requirements.txt
+```
+说明:依赖配置文件requirements.txt文件位于模型的根目录
+
+快速上手
+
+## 数据集准备
+
+1、模型训练使用网络寻找的开源数据集,数据集请用户自行获取
+
+OBS下载链接: [训练数据集](https://e-share.obs-website.cn-north-1.myhuaweicloud.com?token=oW2yZMl66FH/ghTU6+mh7RENEwQ1NxF7PFw/Hf9SHP8SR8C3s86LxYBaWwRN+h9pyT+FFiRWHwXGqgXU0mHW+JxW2y+L6d9z2AFzvCQnpYoJB8Rkr6dqSU/LjjQ/NnKH/toWHRIy8HndxNfxkSMPO8kbwGoEfQoCRi9wNPfTmb3Hp4auBaDWJr+CAgziVlxJJvWxHpuatLSGoMVPQkt0ZI51qhOJiih4Dp+Y85RPHTk2pF0Pa3Ab5XTW2Hj/5FRnfZTXrHQuxtdLgB6O39e+GHI6UsAoUkaDqU5ec1VfbnrDTF/XbWz6OzVlHPwxXlGLZ/udVWGtQiGe+ToaGAxujuSf0HQ72fe8U+TaFNMWZRFQtFi7aj6j2jO8rLRtgNiJikAD+FIMlgkAhjbY7cb5W2Xk7HEJxIwRKSPbhjWsUxV02lnMZulxb2Jz214OrTuZf0zal1FHbE6l/dEY9er2zug4Xpw6yu5vNmGYTGDCLnoSiRFSb9r4fhtxv3QNjTUIkWJX//PKu2ubRjc9H3JXUKGFgq37GMrmHV1hB3OEkhtAyq5Kn+nE2DnQBoR0yvSQFo81qHplxyPCLxtn+q6Y35uYe91E53xEwRmURXoSg3Z0hZbfr9E1Js7HStuIsc7TWTvLUv1sBltasjGvHEmakWJQw3bz7kRdCEcJ+2sq4cECSQv/OKMdtYpZCljoer/o)
+提取码:
+123456
+
+2、数据集处理后,放入模型目录下,在训练脚本中指定数据集路径,可正常使用
+
+3、扩展资料
+
+ vgg16.npy :[下载链接提取码83dt](https://pan.baidu.com/share/init?surl=D4Zoaunwo2rZTNW7HhZjPA)
+
+ face parsing tools:[下载链接](http://dlib.net/files/)
+
+
+## 模型训练
+
+- 单击“立即下载”,并选择合适的下载方式下载源码包。
+- 开始训练。
+
+ - 启动训练之前,首先要配置程序运行相关环境变量。
+
+ 环境变量配置信息参见:
+
+ [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819)
+
+ - 单卡训练
+
+ 1. 启动训练(入口程序为main.py)
+
+ python3 main.py
+
+ 2. 训练精度结果
+
+ | | 1epoch用时(745次迭代) | PSNR | SSIM |
+ |------|------------------|----------|----------|
+ | NPU | 34分51秒 | 14.72197 | 0.711275 |
+ | V100 | 4分0秒 | 16.65672 | 0.780146 |
+
+
+
+ GPU NVIDIA 2080Ti 180 epoch效果
+ 
+
+ NPU 750对图片 训练70epoch效果
+ 
+
+ GPU V100 750对图片 训练70epoch效果:
+ 
+
+
+
+高级参考
+
+## 脚本和示例代码
+
+```
+PairedCycleGAN
+└─
+ ├─README.md
+ ├─layers.py
+ ├─main.py 入口程序
+ ├─model.py 生成对抗网络
+ ├─utils.py
+ ├─vgg16.py
+ ├─perTrainedModel 基于已有的外部模型实现必要功能
+ ├─shape_predictor_68_face_landmarks.dat 人脸识别工具
+ ├─vgg16.npy
+```
+
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/all/.keep b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/all/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/all/images/.keep b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/all/images/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/all/images/makeup/.keep b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/all/images/makeup/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/all/images/non-makeup/.keep b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/all/images/non-makeup/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/layers.py b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..c284ef97dcd62ef23bfedde3601f5b53487376d7
--- /dev/null
+++ b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/layers.py
@@ -0,0 +1,160 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import tensorflow as tf
+
+
+def instance_norm(x):
+ with tf.variable_scope("instance_norm"):
+ epsilon = 1e-5
+ mean,var = tf.nn.moments(x,[1,2],keep_dims=True)
+ scale = tf.get_variable(name="scale",shape=[x.get_shape()[-1]],
+ initializer=tf.truncated_normal_initializer(stddev=0.02,mean=1.0))
+ offset = tf.get_variable(name="offset",shape=[x.get_shape()[-1]],
+ initializer=tf.constant_initializer(0.0))
+ out = scale*tf.div(x-mean,tf.sqrt(var+epsilon))+offset
+ return out
+
+
+def lrelu(x,leak=0.2,name="lrelu",alt_relu_impl=True):
+ with tf.variable_scope(name):
+ if alt_relu_impl:
+ f1 = 0.5*(1+leak)
+ f2 = 0.5*(1-leak)
+ return f1*x+f2*abs(x)
+ else:
+ return tf.maximum(x,leak*x)
+
+
+def spectral_norm(x, iteration=1):
+ """
+ following taki0112's implement
+ :param x:
+ :param iteration:
+ :return:
+ """
+ with tf.variable_scope("spectral_norm"):
+ x_shape = x.shape.as_list()
+ w = tf.reshape(x, [-1, x_shape[-1]])
+ u = tf.get_variable("u", [1, x_shape[-1]], initializer=tf.random_normal_initializer(), trainable=False)
+ u_hat = u
+ v_hat = None
+
+ for i in range(iteration):
+ v_ = tf.matmul(u_hat, tf.transpose(w))
+ v_hat = tf.nn.l2_normalize(v_, dim=None)
+ u_ = tf.matmul(v_hat, w)
+ u_hat = tf.nn.l2_normalize(u_, dim=None)
+ u_hat = tf.stop_gradient(u_hat)
+ v_hat = tf.stop_gradient(v_hat)
+
+ sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
+
+ with tf.control_dependencies([u.assign(u_hat)]):
+ w_norm = w / sigma
+ w_norm = tf.reshape(w_norm, [-1]+x_shape[1:])
+ return w_norm
+
+
+def generate_conv2d(inputconv, o_d=64, kernal_size=7, stride=1,
+ padding="VALID", name="conv2d",stddev=0.02,
+ do_relu=True, do_norm=True, do_sp_norm=False,relufactor=0.2
+ ):
+ with tf.variable_scope(name):
+ # conv = tf.contrib.layers.conv2d(
+ conv = tf.layers.conv2d(
+ # conv=tf.keras.layers.Conv2D(
+ # inputconv, o_d, kernal_size, stride, padding,
+ # activation_fn=None,
+ inputs=inputconv,#
+ filters=o_d,#,
+ kernel_size=kernal_size, # ,,
+ strides=stride,#,
+ padding=padding,#valid:,;same0,
+ activation=None,#
+ # weights_initializer = tf.truncated_normal_initializer(stddev=stddev),
+ # bias_initializer=tf.constant_initializer(0.0) # None
+ # biases_initializer = tf.constant_initializer(0.0)
+ )
+
+ if do_norm:
+ conv = instance_norm(conv)
+
+ if do_sp_norm:
+ conv = spectral_norm(conv)
+
+ if do_relu:
+ if relufactor!=0:
+ conv = lrelu(conv,relufactor,"lrelu")
+ else:
+ conv = tf.nn.relu(conv,name="relu")
+
+ return conv
+
+
+def generate_deconv2d(inputdeconv,o_d=64,kernal_size=7,stride=1,padding="VALID",name="deconv2d",
+ stddev=0.02,do_relu=True,do_norm=True,do_sp_norm=False,relufactor=0.2):
+ with tf.variable_scope(name):
+ # deconv = tf.contrib.layers.conv2d_transpose(inputdeconv,o_d,kernal_size,stride,
+ deconv = tf.layers.conv2d_transpose(
+ # deconv=tf.keras.layers.Conv2DTranspose(
+ # activation_fn=None,
+ # weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
+ # biases_initializer=tf.constant_initializer(0.0))
+ # inputconv, o_d, kernal_size, stride, padding,
+ # activation_fn=None,
+ inputs = inputdeconv, #
+ filters = o_d, # ,
+ kernel_size = kernal_size, # ,,
+ strides = stride, # ,
+ padding = padding, # valid:,;same0,
+ activation = None, #
+ # weights_initializer = tf.truncated_normal_initializer(stddev=stddev),
+ # bias_initializer = tf.constant_initializer(0.0) # None
+ # biases_initializer = tf.constant_initializer(0.0)
+ )
+ if do_norm:
+ deconv = instance_norm(deconv)
+
+ if do_sp_norm:
+ deconv = spectral_norm(deconv)
+
+ if do_relu:
+ if relufactor!=0:
+ deconv = lrelu(deconv,relufactor,name="lrelu")
+ else:
+ deconv = tf.nn.relu(deconv,name="relu")
+ return deconv
+
+
+def generate_resblock(input_res,dim,name="resnet"):
+ with tf.variable_scope(name):
+ out_res = tf.pad(input_res,[[0,0],[1,1],[1,1],[0,0]],"REFLECT")
+ out_res = generate_conv2d(inputconv=out_res,o_d=dim,kernal_size=3,stride=1,padding="VALID",name="c1")
+ out_res = tf.pad(out_res,[[0,0],[1,1],[1,1],[0,0]],"REFLECT")
+ out_res = generate_conv2d(out_res,dim,3,1,"VALID","c2",do_relu=False)
+ return tf.nn.relu(input_res+out_res)
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/main.py b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..83555e4eaaa87a4cbf437ece15d4d2c2ec32a6d2
--- /dev/null
+++ b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/main.py
@@ -0,0 +1,701 @@
+
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from npu_bridge.npu_init import *
+import tensorflow as tf
+# from tensorflow.python.client import device_lib
+# print(device_lib.list_local_devices())
+import pickle
+import dlib
+import cv2
+import numpy as np
+# from scipy.misc import imsave
+from imageio import imsave
+import os
+import time
+import random
+from model import *
+import vgg16
+import utils
+
+Learning_rate=0.0002
+batch_size = 1
+read_batch_size = 1
+# gpu_num = 2
+gpu_num = 1
+# max_images = 1050
+max_images = 700
+pool_size = 50
+train_num = 90
+
+img_height = 256
+img_width = 256
+img_layer = 3
+
+to_restore = False
+
+save_training_images = True
+to_train = True
+to_test = False
+out_path = "./output"
+check_dir = "./output/makeup/checkpoints/"
+load_dir = "imgs.txt"
+
+
+class PairedCycleGAN():
+
+ #
+ def _parse_function(self, filename):
+ image_string = tf.read_file(filename)
+ image_decoded = tf.image.decode_jpeg(image_string)
+ img_resized = tf.image.resize_images(image_decoded, [256, 256])
+ img_dived = tf.div(img_resized, 127.5)
+ img_subtracted = tf.subtract(img_dived, 1)
+ return img_subtracted
+ # return image_string
+ # self.image_A = tf.subtract(tf.div(tf.image.resize_images(tf.image.decode_jpeg(self.image_file_A[0]),[256,256]),127.5),1)
+
+ def Get_file_name(self, file_dir):
+ L = []
+ for root, dirs, files in os.walk(file_dir):
+ for file in files:
+ if os.path.splitext(file)[1] == '.jpg': #
+ # L.append(os.path.normpath(os.path.join(root, file)))
+ L.append(os.path.join(root, file).replace('\\', '/'))
+ L.sort(key=lambda x: int(x[-9:-4]))
+ print(L)
+ return L
+
+ def input_setup(self, sess):
+ """
+ dataset_A:non-makeup
+ dataset_B:makeup
+ :return:
+ """
+
+
+ filename_A_sequence = self.Get_file_name("./all/images/non-makeup")
+ filename_B_sequence = self.Get_file_name("./all/images/makeup")
+ dataset_A = tf.data.Dataset.from_tensor_slices((filename_A_sequence))
+ dataset_B = tf.data.Dataset.from_tensor_slices((filename_B_sequence))
+
+ dataset_A = dataset_A.map(self._parse_function)
+ dataset_B = dataset_B.map(self._parse_function)
+
+ dataset_A = dataset_A.batch(read_batch_size, drop_remainder=True)
+ dataset_B = dataset_B.batch(read_batch_size, drop_remainder=True)
+
+ iterator_A = dataset_A.make_one_shot_iterator()
+ self.img_name_A = iterator_A.get_next()
+ iterator_B = dataset_B.make_one_shot_iterator()
+ self.img_name_B = iterator_B.get_next()
+
+ self.image_file_A = sess.run([self.img_name_A])
+ self.image_file_B = sess.run([self.img_name_B])
+ # self.image_file_A
+ # self.image_file_B
+ # self.image_A = tf.subtract(tf.div(tf.image.resize_images(tf.image.decode_jpeg(self.image_file_A[0]),[256,256]),127.5),1)
+ # self.image_B = tf.subtract(tf.div(tf.image.resize_images(tf.image.decode_jpeg(self.image_file_B[0]),[256,256]),127.5),1)
+
+ def get_mask(self, input_face, detector, predictor, window=5):
+ gray = cv2.cvtColor(input_face, cv2.COLOR_BGR2GRAY)
+ dets = detector(gray, 1)
+
+ for face in dets:
+ shape = predictor(input_face, face)
+ temp = []
+ for pt in shape.parts():
+ temp.append([pt.x, pt.y])
+ lip_mask = np.zeros([256, 256])
+ eye_mask = np.zeros([256, 256])
+ face_mask = np.full((256, 256), 255).astype(np.uint8)
+ cv2.fillPoly(lip_mask, [np.array(temp[48:60]).reshape((-1, 1, 2))], (255, 255, 255))
+ cv2.fillPoly(lip_mask, [np.array(temp[60:68]).reshape((-1, 1, 2))], (0, 0, 0))
+
+ left_left = min(x[0] for x in temp[36:42])
+ left_right = max(x[0] for x in temp[36:42])
+ left_bottom = min(x[1] for x in temp[36:42])
+ left_top = max(x[1] for x in temp[36:42])
+ left_rectangle = np.array(
+ [[left_left - window, left_top + window], [left_right + window, left_top + window],
+ [left_right + window, left_bottom - window], [left_left - window, left_bottom - window]]
+ ).reshape((-1, 1, 2))
+ cv2.fillPoly(eye_mask, [left_rectangle], (255, 255, 255))
+ cv2.fillPoly(eye_mask, [np.array(temp[36:42]).reshape((-1, 1, 2))], (0, 0, 0))
+
+ right_left = min(x[0] for x in temp[42:48])
+ right_right = max(x[0] for x in temp[42:48])
+ right_bottom = min(x[1] for x in temp[42:48])
+ right_top = max(x[1] for x in temp[42:48])
+ right_rectangle = np.array(
+ [[right_left - window, right_top + window], [right_right + window, right_top + window],
+ [right_right + window, right_bottom - window], [right_left - window, right_bottom - window]]
+ ).reshape((-1, 1, 2))
+ cv2.fillPoly(eye_mask, [right_rectangle], (255, 255, 255))
+ cv2.fillPoly(eye_mask, [np.array(temp[42:47]).reshape((-1, 1, 2))], (0, 0, 0))
+
+ cv2.polylines(face_mask, [np.array(temp[17:22]).reshape(-1, 1, 2)], False, (0, 0, 0), 7)
+ cv2.polylines(face_mask, [np.array(temp[22:27]).reshape(-1, 1, 2)], False, (0, 0, 0), 7)
+ cv2.fillPoly(face_mask, [np.array(temp[36:42]).reshape((-1, 1, 2))], (0, 0, 0))
+ cv2.fillPoly(face_mask, [np.array(temp[42:48]).reshape((-1, 1, 2))], (0, 0, 0))
+ cv2.fillPoly(face_mask, [np.array(temp[48:60]).reshape((-1, 1, 2))], (0, 0, 0))
+ return lip_mask, eye_mask, face_mask
+
+ def input_read(self, sess):
+ # coord = tf.train.Coordinator()
+ # threads = tf.train.start_queue_runners(coord=coord)
+ # self.image_file_A = sess.run([self.img_name_A])
+ # self.image_file_B = sess.run([self.img_name_B])
+
+ # num_file_A = sess.run(self.queue_length_A)
+ # num_file_B = sess.run(self.queue_length_B)
+
+ self.fake_images_A = np.zeros((pool_size, 1, img_height, img_width, img_layer))
+ self.fake_images_B = np.zeros((pool_size, 1, img_height, img_width, img_layer))
+
+ self.A_input = np.zeros((max_images, batch_size, img_height, img_width, img_layer))
+ self.B_input = np.zeros((max_images, batch_size, img_height, img_width, img_layer))
+ self.A_input_mask = np.zeros((max_images, 3, img_height, img_width))
+ self.B_input_mask = np.zeros((max_images, 3, img_height, img_width))
+
+ if not os.path.exists(load_dir):
+ cur_A = 0
+ for i in range(int(max_images / read_batch_size)):
+ if (i != 0):
+ try:
+ # sess.run(self.image_file_A)
+ self.image_file_A = sess.run([self.img_name_A]) # batchsess
+ # print(self.image_file_A)
+ # self.image_A = tf.subtract(
+ # tf.div(tf.image.resize_images(tf.image.decode_jpeg(self.image_file_A[0]), [256, 256]), 127.5),
+ # 1)
+ except tf.errors.OutOfRangeError:
+ print("iterator A done")
+ # break
+
+ # image_tensor = sess.run(self.image_A)
+ # image_tensor = np.array(self.image_file_A)
+ batch_tensor = np.array(self.image_file_A) # batch
+ for i in range(read_batch_size): # batch
+ image_tensor = batch_tensor[0][i]
+ # print("image_tensor:{}".format(image_tensor))
+ # if image_tensor.size==img_width*img_height*img_layer:
+ if image_tensor.size == img_width * img_height * img_layer:
+ temp = ((image_tensor + 1) * 127.5).astype(np.uint8)
+ res = self.get_mask(temp, self.detector, self.predictor)
+ if res != None:
+ self.A_input[cur_A] = image_tensor.reshape((batch_size, img_height, img_width, img_layer))
+ self.A_input_mask[cur_A][0] = np.equal(res[0], 255)
+ self.A_input_mask[cur_A][1] = np.equal(res[1], 255)
+ self.A_input_mask[cur_A][2] = np.equal(res[2], 255)
+ cur_A += 1
+ else:
+ print("{}".format(cur_A + 1))
+ print("cur_A:{}".format(cur_A))
+
+ cur_B = 0
+ for i in range(int(max_images / read_batch_size)):
+ if (i != 0):
+ try:
+ # sess.run(self.image_file_B)
+ self.image_file_B = sess.run([self.img_name_B])
+ # self.image_B = tf.subtract(
+ # tf.div(tf.image.resize_images(tf.image.decode_jpeg(self.image_file_B[0]), [256, 256]), 127.5),
+ # 1)
+ # self.image_file_A = sess.run([self.img_name_A])
+ # self.image_file_B = sess.run([self.img_name_B])
+ except tf.errors.OutOfRangeError:
+ print("iterator B done")
+ # break
+ batch_tensor = np.array(self.image_file_B) # batch
+ for i in range(read_batch_size): # batch
+ image_tensor = batch_tensor[0][i]
+ if image_tensor.size == img_width * img_height * img_layer:
+ self.B_input[i] = image_tensor.reshape((batch_size, img_height, img_width, img_layer))
+ temp = ((image_tensor + 1) * 127.5).astype(np.uint8)
+ res = self.get_mask(temp, self.detector, self.predictor)
+ if res != None:
+ self.B_input[cur_B] = image_tensor.reshape((batch_size, img_height, img_width, img_layer))
+ self.B_input_mask[cur_B][0] = np.equal(res[0], 255)
+ self.B_input_mask[cur_B][1] = np.equal(res[1], 255)
+ self.B_input_mask[cur_B][2] = np.equal(res[2], 255)
+ cur_B += 1
+ else:
+ print("{}".format(cur_B + 1))
+ print("cur_B:{}".format(cur_B))
+
+ # os.mknod(load_dir)
+ fw = open(load_dir, "wb")
+ pickle.dump(self.A_input, fw)
+ pickle.dump(self.B_input, fw)
+ pickle.dump(self.A_input_mask, fw)
+ pickle.dump(self.B_input_mask, fw)
+ pickle.dump(cur_A, fw)
+ pickle.dump(cur_B, fw)
+
+ else:
+ fr = open(load_dir, "rb")
+ self.A_input = pickle.load(fr)
+ self.B_input = pickle.load(fr)
+ self.A_input_mask = pickle.load(fr)
+ self.B_input_mask = pickle.load(fr)
+ cur_A = pickle.load(fr)
+ cur_B = pickle.load(fr)
+
+ self.train_num = min(cur_A, cur_B)
+ print("68 benchmark face number: ", self.train_num)
+
+ # coord.request_stop()
+ # coord.join(threads)
+
+ def average_gradients(self, tower_grads):
+ average_grads = []
+ for grad_and_vars in zip(*tower_grads):
+ grads = []
+ for g, _ in grad_and_vars:
+ expend_g = tf.expand_dims(g, 0)
+ grads.append(expend_g)
+ grad = tf.concat(grads, 0)
+ grad = tf.reduce_mean(grad, 0)
+ v = grad_and_vars[0][1]
+ grad_and_var = (grad, v)
+ average_grads.append(grad_and_var)
+ return average_grads
+
+ def model_setup(self):
+ self.input_A = tf.placeholder(tf.float32, [batch_size, img_height, img_width, img_layer], name="input_A")
+ self.input_B = tf.placeholder(tf.float32, [batch_size, img_height, img_width, img_layer], name="input_B")
+
+ # self.input_A_multigpu = tf.placeholder(tf.float32,[batch_size*gpu_num,img_height,img_width,img_layer],
+ # name="input_A")
+ # self.input_B_multigpu = tf.placeholder(tf.float32, [batch_size * gpu_num, img_height, img_width, img_layer],
+ # name="input_B")
+
+ self.input_A_mask = tf.placeholder(tf.bool, [3, img_height, img_width], name="input_A_mask")
+ self.input_B_mask = tf.placeholder(tf.bool, [3, img_height, img_width], name="input_B_mask")
+
+ # self.input_A_mask_multigpu = tf.placeholder(tf.bool,[gpu_num,3,img_height,img_width],name="input_A_mask")
+ # self.input_B_mask_multigpu = tf.placeholder(tf.bool, [gpu_num, 3, img_height, img_width], name="input_B_mask")
+
+ self.fake_pool_A = tf.placeholder(tf.float32, [None, img_height, img_width, img_layer], name="fake_pool_A")
+ self.fake_pool_B = tf.placeholder(tf.float32, [None, img_height, img_width, img_layer], name="fake_pool_B")
+
+ self.num_fake_inputs = 0
+ self.global_step = tf.Variable(0, name="global_step", trainable=False)
+ self.lr = tf.placeholder(tf.float32, shape=[], name="lr")
+ self.predictor = dlib.shape_predictor("./preTrainedModel/shape_predictor_68_face_landmarks.dat")
+ self.detector = dlib.get_frontal_face_detector()
+
+ # using single gpu
+ with tf.variable_scope("Model") as scope:
+ self.fake_B, self.fake_A = build_generator(self.input_A, self.input_B, name="generator")
+ self.rec_A = generate_discriminator(self.input_A, "d_A")
+ self.rec_B = generate_discriminator(self.input_B, "d_B")
+
+ scope.reuse_variables()
+
+ self.fake_rec_A = generate_discriminator(self.fake_A, "d_A")
+ self.fake_rec_B = generate_discriminator(self.fake_B, "d_B")
+ self.cyc_A, self.cyc_B = build_generator(self.fake_B, self.fake_A, name="generator")
+
+ scope.reuse_variables()
+
+ self.fake_pool_rec_A = generate_discriminator(self.fake_pool_A, "d_A")
+ self.fake_pool_rec_B = generate_discriminator(self.fake_pool_B, "d_B")
+
+ self.perc_A = tf.cast(tf.image.resize_images((self.input_A + 1) * 127.5, [224, 224]), tf.float32)
+ self.perc_B = tf.cast(tf.image.resize_images((self.input_B + 1) * 127.5, [224, 224]), tf.float32)
+ self.perc_fake_B = tf.cast(tf.image.resize_images((self.fake_B + 1) * 127.5, [224, 224]), tf.float32)
+ self.perc_fake_A = tf.cast(tf.image.resize_images((self.fake_A + 1) * 127.5, [224, 224]), tf.float32)
+ self.perc = self.perc_loss_cal(
+ tf.concat([self.perc_A, self.perc_B, self.perc_fake_B, self.perc_fake_A], axis=0))
+ percep_norm, var = tf.nn.moments(self.perc, [1, 2], keep_dims=True)
+ self.perc = tf.divide(self.perc, tf.add(percep_norm, 1e-5))
+
+ def perc_loss_cal(self, input_tensor):
+ vgg = vgg16.Vgg16("./preTrainedModel/vgg16.npy")
+ vgg.build(input_tensor)
+ return vgg.conv4_1
+
+ def mrf_loss_cal(self, source, template, ks):
+ temp = tf.extract_image_patches(source, ksizes=[1, ks, ks, 1], strides=[1, 1, 1, 1], rates=[1, 1, 1, 1],
+ padding="VALID")
+ temp_shape = temp.get_shape().as_list()
+ source_image_patch = tf.nn.l2_normalize(temp, dim=[3])
+
+ template_image_patch = tf.extract_image_patches(template, ksizes=[1, ks, ks, 1], strides=[1, 1, 1, 1],
+ rates=[1, 1, 1, 1], padding="VALID")
+ template_image_patch = tf.nn.l2_normalize(template_image_patch, dim=[3])
+
+ shape = source_image_patch.get_shape().as_list()
+ height = shape[1]
+ width = shape[2]
+ depth = shape[3]
+
+ source_image_patch = tf.reshape(source_image_patch, [height * width, ks, ks, depth // ks // ks])
+ template_image_patch = tf.reshape(template_image_patch, [height * width, ks, ks, depth // ks // ks])
+ temp_reshape = tf.reshape(temp, [height * width, ks, ks, depth // ks // ks])
+
+ template_image_patch = tf.transpose(template_image_patch, perm=[1, 2, 3, 0])
+ convs = tf.nn.conv2d(source_image_patch, template_image_patch, strides=[1, 1, 1, 1], padding="VALID")
+ argmax = tf.argmax(convs, dimension=3)
+
+ best_match = tf.gather(temp_reshape, indices=argmax)
+ best_match = tf.reshape(best_match, shape=temp_shape)
+
+ loss = tf.divide(tf.reduce_mean(tf.squared_difference(best_match, temp)), ks ** 2)
+ return loss
+
+ def histogram_loss_cal(self, source, template, source_mask, template_mask):
+ shape = tf.shape(source)
+ source = tf.reshape(source, [1, -1])
+ template = tf.reshape(template, [1, -1])
+ source_mask = tf.reshape(source_mask, [-1, 256 * 256])
+ template_mask = tf.reshape(template_mask, [-1, 256 * 256])
+
+ source = tf.boolean_mask(source, source_mask)
+ template = tf.boolean_mask(template, template_mask)
+
+ his_bins = 255
+
+ max_value = tf.reduce_max([tf.reduce_max(source), tf.reduce_max(template)])
+ min_value = tf.reduce_min([tf.reduce_min(source), tf.reduce_min(template)])
+
+ hist_delta = (max_value - min_value) / his_bins
+ hist_range = tf.range(min_value, max_value, hist_delta)
+ hist_range = tf.add(hist_range, tf.divide(hist_delta, 2))
+
+ s_hist = tf.histogram_fixed_width(source, [min_value, max_value], his_bins, dtype=tf.int32)
+ t_hist = tf.histogram_fixed_width(template, [min_value, max_value], his_bins, dtype=tf.int32)
+
+ s_quantiles = tf.cumsum(s_hist)
+ s_last_element = tf.subtract(tf.size(s_quantiles), tf.constant(1))
+ s_quantiles = tf.divide(s_quantiles, tf.gather(s_quantiles, s_last_element))
+
+ t_quantiles = tf.cumsum(t_hist)
+ t_last_element = tf.subtract(tf.size(t_quantiles), tf.constant(1))
+ t_quantiles = tf.divide(t_quantiles, tf.gather(t_quantiles, t_last_element))
+
+ nearest_indices = tf.map_fn(lambda x: tf.argmin(tf.abs(tf.subtract(t_quantiles, x))), s_quantiles,
+ dtype=tf.int64)
+ s_bin_index = tf.to_int64(tf.divide(source, hist_delta))
+ s_bin_index = tf.clip_by_value(s_bin_index, 0, 254)
+
+ matched_to_t = tf.gather(hist_range, tf.gather(nearest_indices, s_bin_index))
+ # Using the same normalization as Gatys' style transfer: A huge variation--the normalization scalar is different according to different image
+ # normalization includes variation constraints may be better
+ matched_to_t = tf.subtract(tf.div(matched_to_t, 127.5), 1)
+ source = tf.subtract(tf.divide(source, 127.5), 1)
+ return tf.reduce_mean(tf.squared_difference(matched_to_t, source))
+
+ def loss_cal(self):
+ cyc_loss = tf.reduce_mean(tf.abs(self.input_A - self.cyc_A)) + tf.reduce_mean(tf.abs(self.input_B - self.cyc_B))
+ self.cycle_loss = cyc_loss
+ disc_loss_A = tf.reduce_mean(tf.squared_difference(self.fake_rec_A, 1))
+ disc_loss_B = tf.reduce_mean(tf.squared_difference(self.fake_rec_B, 1))
+
+ temp_source = tf.cast((self.fake_B[0, :, :, 0] + 1) * 127.5, dtype=tf.float32)
+ temp_template = tf.cast((self.input_B[0, :, :, 0] + 1) * 127.5, dtype=tf.float32)
+ histogram_loss_r_lip = self.histogram_loss_cal(temp_source, temp_template, self.input_A_mask[0],
+ self.input_B_mask[0])
+ histogram_loss_r_eye = self.histogram_loss_cal(temp_source, temp_template, self.input_A_mask[1],
+ self.input_B_mask[1])
+ # histogram_loss_r_face = self.histogram_loss_cal(temp_source,temp_template, self.input_A_mask[2],self.input_B_mask[2])
+ histogram_loss_r = histogram_loss_r_lip + histogram_loss_r_eye
+
+ temp_source = tf.cast((self.fake_B[0, :, :, 1] + 1) * 127.5, dtype=tf.float32)
+ temp_template = tf.cast((self.input_B[0, :, :, 1] + 1) * 127.5, dtype=tf.float32)
+ histogram_loss_g_lip = self.histogram_loss_cal(temp_source, temp_template, self.input_A_mask[0],
+ self.input_B_mask[0])
+ histogram_loss_g_eye = self.histogram_loss_cal(temp_source, temp_template, self.input_A_mask[1],
+ self.input_B_mask[1])
+ # histogram_loss_g_face = self.histogram_loss_cal(temp_source,temp_template,self.input_A_mask[2],self.input_B_mask[2])
+ histogram_loss_g = histogram_loss_g_lip + histogram_loss_g_eye
+
+ temp_source = tf.cast((self.fake_B[0, :, :, 2] + 1) * 127.5, dtype=tf.float32)
+ temp_template = tf.cast((self.input_B[0, :, :, 2] + 1) * 127.5, dtype=tf.float32)
+ histogram_loss_b_lip = self.histogram_loss_cal(temp_source, temp_template, self.input_A_mask[0],
+ self.input_B_mask[0])
+ histogram_loss_b_eye = self.histogram_loss_cal(temp_source, temp_template, self.input_A_mask[1],
+ self.input_B_mask[1])
+ # histogram_loss_b_face = self.histogram_loss_cal(temp_source,temp_template,self.input_A_mask[2],self.input_B_mask[2])
+ histogram_loss_b = histogram_loss_b_lip + histogram_loss_b_eye
+
+ makeup_loss = histogram_loss_r + histogram_loss_g + histogram_loss_b
+
+ # Using the same normalization as Gatys' neural style transfer
+ # Increase the lambda from 0.005 to 0.05
+ # cycle loss:2
+ perceptual_loss = tf.reduce_mean(tf.squared_difference(self.perc[0], self.perc[2])) + tf.reduce_mean(
+ tf.squared_difference(self.perc[1], self.perc[3]))
+
+ # tv_loss = tf.image.total_variation(self.fake_B)
+
+ # g_loss = cyc_loss * 20 + disc_loss_B + disc_loss_A + perceptual_loss * 0.05 + makeup_loss * 0.5
+ g_loss = cyc_loss * 20 + disc_loss_B + disc_loss_A + perceptual_loss * 0.05 + makeup_loss * 0.5
+ self.total_loss=cyc_loss*20+disc_loss_B+disc_loss_A+perceptual_loss*0.05+makeup_loss*0.5
+
+ d_loss_A = (tf.reduce_mean(tf.square(self.fake_pool_rec_A)) + tf.reduce_mean(
+ tf.squared_difference(self.rec_A, 1))) / 2.0
+ d_loss_B = (tf.reduce_mean(tf.square(self.fake_pool_rec_B)) + tf.reduce_mean(
+ tf.squared_difference(self.rec_B, 1))) / 2.0
+
+ #optimizer = tf.train.AdamOptimizer(self.lr, beta1=0.5)
+ #===========Loss Scale========================================
+ optimizer_tmp=tf.train.AdamOptimizer(self.lr, beta1=0.5)
+ loss_scale_manager = ExponentialUpdateLossScaleManager(init_loss_scale=2**32, incr_every_n_steps=1000, decr_every_n_nan_or_inf=2, decr_ratio=0.5)
+ optimizer = NPULossScaleOptimizer(optimizer_tmp, loss_scale_manager)
+ #===========Loss Scale========================================
+ self.model_vars = tf.trainable_variables()
+ d_A_vars = [var for var in self.model_vars if "d_A" in var.name]
+ d_B_vars = [var for var in self.model_vars if "d_B" in var.name]
+ g_vars = [var for var in self.model_vars if "generator" in var.name]
+
+ self.d_A_trainer = optimizer.minimize(d_loss_A, var_list=d_A_vars)
+ self.d_B_trainer = optimizer.minimize(d_loss_B, var_list=d_B_vars)
+ self.g_trainer = optimizer.minimize(g_loss, var_list=g_vars)
+
+ for var in self.model_vars:
+ print(var.name)
+
+ self.disc_A_loss_sum = tf.summary.scalar("disc_loss_A", disc_loss_A)
+ self.disc_B_loss_sum = tf.summary.scalar("disc_loss_B", disc_loss_B)
+ self.cyc_loss_sum = tf.summary.scalar("cyc_loss", cyc_loss)
+ self.makeup_loss_sum = tf.summary.scalar("makeup_loss", makeup_loss)
+ self.percep_loss_sum = tf.summary.scalar("perceptual_loss", perceptual_loss)
+ self.g_loss_sum = tf.summary.scalar("g_loss", g_loss)
+ # self.tv_loss_sum = tf.summary.scalar("tv_loss",tv_loss)
+
+ self.g_summary = tf.summary.merge([
+ self.disc_A_loss_sum, self.disc_B_loss_sum, self.cyc_loss_sum, self.makeup_loss_sum, self.percep_loss_sum,
+ self.g_loss_sum,
+ ], "g_summary")
+
+ self.d_A_loss_sum = tf.summary.scalar("d_A_loss", d_loss_A)
+ self.d_B_loss_sum = tf.summary.scalar("d_B_loss", d_loss_B)
+
+ def save_training_images(self, sess, epoch):
+ if not os.path.exists("./output/makeup/output_imgs"):
+ os.makedirs("./output/makeup/output_imgs")
+ if not os.path.exists("./output/makeup/cyc_imgs"):
+ os.makedirs("./output/makeup/cyc_imgs")
+ # for i in range(0,10):
+ for i in range(0, max_images):
+ fake_A_temp, fake_B_temp, cyc_A_temp, cyc_B_temp = sess.run(
+ [self.fake_A, self.fake_B, self.cyc_A, self.cyc_B], feed_dict={
+ self.input_A: self.A_input[i],
+ self.input_B: self.B_input[i]
+ })
+ imsave("./output/makeup/output_imgs/fakeA_" + str(epoch) + "_" + str(i) + ".jpg",
+ ((fake_A_temp[0] + 1) * 127.5).astype(np.uint8))
+ imsave("./output/makeup/output_imgs/fakeB_" + str(epoch) + "_" + str(i) + ".jpg",
+ ((fake_B_temp[0] + 1) * 127.5).astype(np.uint8))
+ imsave("./output/makeup/cyc_imgs/cycA_" + str(epoch) + "_" + str(i) + ".jpg",
+ ((cyc_A_temp[0] + 1) * 127.5).astype(np.uint8))
+ imsave("./output/makeup/cyc_imgs/cycB_" + str(epoch) + "_" + str(i) + ".jpg",
+ ((cyc_B_temp[0] + 1) * 127.5).astype(np.uint8))
+
+ def fake_image_pool(self, num_fakes, fake, fake_pool):
+ if num_fakes < pool_size:
+ fake_pool[num_fakes] = fake
+ return fake
+ else:
+ p = random.random()
+ if p > 0.5:
+ random_id = random.randint(0, pool_size - 1)
+ temp = fake_pool[random_id]
+ fake_pool[random_id] = fake
+ return temp
+ else:
+ return fake
+
+ def train(self):
+ # self.input_setup()
+ self.model_setup()
+ self.loss_cal()
+
+ init = [tf.local_variables_initializer(), tf.global_variables_initializer()]
+ init_npu=tf.global_variables_initializer()
+ init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
+ saver = tf.train.Saver()
+
+
+ # =============================================================================================
+ # npu
+ config = tf.ConfigProto()
+ custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
+ custom_op.name = "NpuOptimizer"
+ custom_op.parameter_map["use_off_line"].b = True
+ custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")#
+ config.graph_options.rewrite_options.remapping = RewriterConfig.OFF #
+ config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF #
+
+ # =============================================================================================
+ with tf.Session(config=config) as sess:
+ #with tf.Session(config=npu_config_proto(config_proto=config)) as sess:
+
+ sess.run(init)
+ #sess.run(init_op)
+ #sess.run(init_npu)
+ self.input_setup(sess)
+ self.input_read(sess)
+
+ if to_restore:
+ chkpt_fname = tf.train.latest_checkpoint(check_dir)
+ saver.restore(sess, chkpt_fname)
+
+ writer = tf.summary.FileWriter("./output/makeup/2")
+
+ if not os.path.exists(check_dir):
+ os.makedirs(check_dir)
+
+ for epoch in range(sess.run(self.global_step), train_num):
+ print("in the epoch ", epoch)
+ saver.save(sess, os.path.join(check_dir, "PairedCycleGAN"), global_step=epoch)
+
+ # if epoch < 100:
+ # curr_lr = 0.0002
+ # else:
+ # curr_lr = 0.0002 - 0.0002 * (epoch - 100) / train_num
+ if epoch < 100:
+ curr_lr = Learning_rate
+ else:
+ curr_lr = Learning_rate - Learning_rate * (epoch - 100) / train_num
+
+ if save_training_images:
+ self.save_training_images(sess, epoch)
+
+ for ptr in range(0, self.train_num):
+ print("in the iteration", ptr)
+ print(time.ctime())
+ _, fake_B_temp, fake_A_temp, summary_str = sess.run(
+ [self.g_trainer, self.fake_B, self.fake_A, self.g_summary], feed_dict={
+ self.input_A: self.A_input[ptr],
+ self.input_B: self.B_input[ptr],
+ self.lr: curr_lr,
+ self.input_A_mask: self.A_input_mask[ptr],
+ self.input_B_mask: self.B_input_mask[ptr],
+ })
+ writer.add_summary(summary_str, epoch * self.train_num + ptr)
+ totalLoss = sess.run(self.total_loss, feed_dict={
+ self.input_A: self.A_input[ptr],
+ self.input_B: self.B_input[ptr],
+ self.lr: curr_lr,
+ self.input_A_mask: self.A_input_mask[ptr],
+ self.input_B_mask: self.B_input_mask[ptr],
+ })
+ cycleLoss = sess.run(self.cycle_loss, feed_dict={
+ self.input_A: self.A_input[ptr],
+ self.input_B: self.B_input[ptr],
+ self.lr: curr_lr,
+ self.input_A_mask: self.A_input_mask[ptr],
+ self.input_B_mask: self.B_input_mask[ptr],
+ })
+ print("totalLoss is :", totalLoss)
+ print("CycleLoss is :", cycleLoss)
+ fake_A_temp1 = self.fake_image_pool(self.num_fake_inputs, fake_A_temp, self.fake_images_A)
+ fake_B_temp1 = self.fake_image_pool(self.num_fake_inputs, fake_B_temp, self.fake_images_B)
+
+ _, summary_str = sess.run([self.d_A_trainer, self.d_A_loss_sum], feed_dict={
+ self.input_A: self.A_input[ptr],
+ self.input_B: self.B_input[ptr],
+ self.lr: curr_lr,
+ self.fake_pool_A: fake_A_temp1
+ })
+ writer.add_summary(summary_str, epoch * self.train_num + ptr)
+
+ _, summary_str = sess.run([self.d_B_trainer, self.d_B_loss_sum], feed_dict={
+ self.input_A: self.A_input[ptr],
+ self.input_B: self.B_input[ptr],
+ self.lr: curr_lr,
+ self.fake_pool_B: fake_B_temp1
+ })
+ writer.add_summary(summary_str, epoch * self.train_num + ptr)
+
+ self.num_fake_inputs += 1
+ sess.run(tf.assign(self.global_step, epoch + 1))
+ writer.add_graph(sess.graph)
+
+ def test(self):
+ print("Testing the results")
+ # self.input_setup()
+ self.model_setup()
+ init = [tf.local_variables_initializer(), tf.global_variables_initializer()]
+ init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
+
+ saver = tf.train.Saver()
+
+ # config = tf.ConfigProto(allow_soft_placement=True)
+ # config = tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(allow_growth=True))
+
+ # =============================================================================================
+ # npu
+ config = tf.ConfigProto()
+ custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
+ custom_op.name = "NpuOptimizer"
+ config.graph_options.rewrite_options.remapping = RewriterConfig.OFF #
+ config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF #
+
+ # =============================================================================================
+ with tf.compat.v1.Session(config=npu_config_proto(config_proto=config)) as sess:
+ sess.run(init_op)
+ self.input_setup(sess)
+ self.input_read(sess)
+ chkpt_fanem = tf.train.latest_checkpoint(check_dir)
+ saver.restore(sess, chkpt_fanem)
+
+ if not os.path.exists("./output/makeup/test_imgs/makeup"):
+ os.makedirs("./output/makeup/test_imgs/makeup")
+ if not os.path.exists("./output/makeup/test_imgs/fake_A"):
+ os.makedirs("./output/makeup/test_imgs/fake_A")
+
+ for i in range(self.train_num):
+
+ # for single gpu
+ fake_A_temp, fake_B_temp = sess.run([self.fake_A, self.fake_B], feed_dict={
+ self.input_A: self.A_input[i],
+ self.input_B: self.B_input[i]
+ })
+ imsave("./output/makeup/test_imgs/fake_A/fakeA_" + str(i) + ".jpg",
+ ((fake_A_temp[0] + 1) * 127.5).astype(np.uint8))
+ imsave("./output/makeup/test_imgs/makeup/fakeB_" + str(i) + ".jpg",
+ ((fake_B_temp[0] + 1) * 127.5).astype(np.uint8))
+
+
+def main():
+ model = PairedCycleGAN()
+ if to_train:
+ model.train()
+ elif to_test:
+ model.test()
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/model.py b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..21b40a19f69b49274d13fadab4632cc2bbced985
--- /dev/null
+++ b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/model.py
@@ -0,0 +1,103 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import tensorflow as tf
+
+from layers import *
+
+ngf = 32
+ndf = 64
+
+
+def build_generator(input_A,input_B,name="generator"):
+ with tf.variable_scope(name):
+ ks = 3
+ fs = 7
+ input_pad_A = tf.pad(input_A,[[0,0],[3,3],[3,3],[0,0]],"REFLECT")
+ input_pad_B = tf.pad(input_B,[[0,0],[3,3],[3,3],[0,0]],"REFLECT")
+
+ A_c1 = generate_conv2d(inputconv=input_pad_A,o_d=ngf,kernal_size=fs,stride=1,padding="VALID",
+ name="A_c1",stddev=0.02) # 1*256*256*32
+ B_c1 = generate_conv2d(inputconv=input_pad_B,o_d=ngf,kernal_size=fs,stride=1,padding="VALID",
+ name="B_c1",stddev=0.02) # 1*256*256*32
+ A_c2 = generate_conv2d(inputconv=A_c1,o_d=ngf*2,kernal_size=ks,stride=2,padding="SAME",
+ name="A_c2",stddev=0.02) # 1*128*128*64
+ B_c2 = generate_conv2d(inputconv=B_c1,o_d=ngf*2,kernal_size=ks,stride=2,padding="SAME",
+ name="B_c2",stddev=0.02) # 1*128*128*64
+ A_c3 = generate_conv2d(inputconv=A_c2,o_d=ngf*4,kernal_size=ks,stride=2,padding="SAME",
+ name="A_c3",stddev=0.02) # 1*64*64*128
+ B_c3 = generate_conv2d(inputconv=B_c2,o_d=ngf*4,kernal_size=ks,stride=2,padding="SAME",
+ name="B_c3",stddev=0.02) #1*64*64*128
+
+ input_res = tf.concat([A_c3,B_c3],axis=-1,name="concat") #1*64*64*256
+ o_r1 = generate_resblock(input_res,dim=ngf*8,name="r1")
+ o_r2 = generate_resblock(o_r1,dim=ngf*8,name="r2")
+ o_r3 = generate_resblock(o_r2,dim=ngf*8,name="r3")
+ o_r4 = generate_resblock(o_r3,dim=ngf*8,name="r4")
+ o_r5 = generate_resblock(o_r4,dim=ngf*8,name="r5")
+ o_r6 = generate_resblock(o_r5,dim=ngf*8,name="r6")
+ o_r7 = generate_resblock(o_r6,dim=ngf*8,name="r7")
+ o_r8 = generate_resblock(o_r7,dim=ngf*8,name="r8")
+ o_r9 = generate_resblock(o_r8,dim=ngf*8,name="r9")
+
+ o_r9_A = tf.slice(o_r9,[0,0,0,0],[1,64,64,128])
+ o_r9_B = tf.slice(o_r9,[0,0,0,128],[1,64,64,128])
+
+ A_c4 = generate_deconv2d(inputdeconv=o_r9_A,o_d=ngf*2,kernal_size=ks,stride=2,padding="SAME",
+ name="A_c4",stddev=0.02)
+ B_c4 = generate_deconv2d(inputdeconv=o_r9_B,o_d=ngf*2,kernal_size=ks,stride=2,padding="SAME",
+ name="B_c4",stddev=0.02)
+ A_c5 = generate_deconv2d(inputdeconv=A_c4,o_d=ngf,kernal_size=ks,stride=2,padding="SAME",
+ name="A_c5",stddev=0.02)
+ B_c5 = generate_deconv2d(inputdeconv=B_c4,o_d=ngf,kernal_size=ks,stride=2,padding="SAME",
+ name="B_c5",stddev=0.02)
+ A_c6 = generate_deconv2d(inputdeconv=A_c5,o_d=3,kernal_size=fs,stride=1,padding="SAME",
+ name="A_c6",stddev=0.02,do_relu=False)
+ B_c6 = generate_deconv2d(inputdeconv=B_c5,o_d=3,kernal_size=fs,stride=1,padding="SAME",
+ name="B_c6",stddev=0.02,do_relu=False)
+
+ out_gen_A = tf.nn.tanh(A_c6,name="A_t")
+ out_gen_B = tf.nn.tanh(B_c6,name="B_t")
+
+ return out_gen_A,out_gen_B
+
+
+def generate_discriminator(inputdis,name="discriminator"):
+ """
+ :param inputdis: 1*256*256*3
+ :param name:
+ :return:
+ """
+ with tf.variable_scope(name):
+ f = 4
+ # spectral normlization
+ oc_1 = generate_conv2d(inputdis,64,f,2,"SAME",name="c1",do_norm=False,relufactor=0.2) # 1*128*128*64
+ oc_2 = generate_conv2d(oc_1,128,f,2,"SAME",name="c2",do_norm=False,do_sp_norm=True,relufactor=0.2) # 1*64*64*128
+ oc_3 = generate_conv2d(oc_2,256,f,2,"SAME",name="c3",do_norm=False,do_sp_norm=True,relufactor=0.2) # 1*32*32*256
+ oc_4 = generate_conv2d(oc_3,512,f,1,"SAME",name="c4",do_norm=False,do_sp_norm=True,relufactor=0.2) # 1*32*32*512
+ oc_5 = generate_conv2d(oc_4,1,f,1,"SAME",name="c5",do_norm=False,do_sp_norm=False,do_relu=False) # 1*32*32*1
+ return oc_5
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/modelzoo_level.txt b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/modelzoo_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..461321acac36505b340c908c09e10a07cc184c0a
--- /dev/null
+++ b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/modelzoo_level.txt
@@ -0,0 +1,16 @@
+-----仅限训练-----
+
+GPUStatus:OK
+NPUMigrationStatus:OK
+
+-----仅限推理-----
+
+ModelConvert:OK
+QuantStatus:POK
+
+-----通用部分-----
+
+FuncStatus:OK
+PrecisionStatus:POK
+AutoTune:POK
+PerfStatus:NOK
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/pic/.keep b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/pic/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/pic/GPu.jpg b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/pic/GPu.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..7d249318379d824d10bde9f37eaa3a146e4eba8e
Binary files /dev/null and b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/pic/GPu.jpg differ
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/pic/npu.jpg b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/pic/npu.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..f32b477e24d242ce9aeb4817c8acbeba51458e07
Binary files /dev/null and b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/pic/npu.jpg differ
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/preTrainedModel/.keep b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/preTrainedModel/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/requirements.txt b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..98e967bf7d625c7d883706fddc6957bd11dcbd47
--- /dev/null
+++ b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/requirements.txt
@@ -0,0 +1,9 @@
+python==3.7.5
+dlib==19.22.0
+numpy==1.21.2
+face-recognition==1.3.0
+imageio==2.9.0
+imtuils==0.5.4
+tensorflow-gpu==1.15.0
+cudnn=7.6.5
+cudatoolkit=10.0.130
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/test/.keep b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/test/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/test/train_full_1p.sh b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/test/train_full_1p.sh
new file mode 100644
index 0000000000000000000000000000000000000000..5cc84885f8f25a88835bdf5ac674e0ad71d74d2c
--- /dev/null
+++ b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/test/train_full_1p.sh
@@ -0,0 +1,166 @@
+#!/bin/bash
+
+#当前路径,不需要修改
+cur_path=`pwd`/../
+
+#集合通信参数,不需要修改
+
+export RANK_SIZE=1
+export JOB_ID=10087
+RANK_ID_START=0
+
+
+# 数据集路径,保持为空,不需要修改
+data_path=''
+#预训练模型地址
+ckpt_path=''
+
+#基础参数,需要模型审视修改
+#网络名称,同目录名称
+Network="PairedCycleGAN_ID1281_for_TensorFlow"
+#训练epoch
+train_epochs=180
+#训练batch_size
+batch_size=1
+#训练step
+train_steps=800
+#学习率
+learning_rate=0.0002
+
+#TF2.X独有,需要模型审视修改
+export NPU_LOOP_SIZE=${train_steps}
+
+#维测参数,precision_mode需要模型审视修改
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+
+
+# 帮助信息,不需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo"usage:./train_full_1p.sh "
+ echo " "
+ echo "parameter explain:
+ --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision)
+ --over_dump if or not over detection, default is False
+ --data_dump_flag data dump flag, default is False
+ --data_dump_step data dump step, default is 10
+ --profiling if or not profiling for performance debug, default is False
+ --data_path source data of training
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+#参数校验,不需要修改
+for para in $*
+do
+ if [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/test/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/test/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/test/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+
+ elif [[ $para == --ckpt_path* ]];then
+ ckpt_path=`echo ${para#*=}`
+ fi
+done
+
+#校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be confing"
+ exit 1
+
+fi
+
+#训练开始时间,不需要修改
+start_time=$(date +%s)
+
+
+#进入训练脚本目录,需要模型审视修改
+cd $cur_path/
+for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++));
+do
+ #设置环境变量,不需要修改
+ echo "Device ID: $ASCEND_DEVICE_ID"
+ export RANK_ID=$RANK_ID
+
+
+
+ #创建DeviceID输出目录,不需要修改
+ if [ -d ${cur_path}/test/output/${ASCEND_DEVICE_ID} ];then
+ rm -rf ${cur_path}/test/output/${ASCEND_DEVICE_ID}
+ mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt
+ else
+ mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt
+ fi
+
+
+
+
+ # 绑核,不需要的绑核的模型删除,需要的模型审视修改
+ let a=RANK_ID*12
+ let b=RANK_ID+1
+ let c=b*12-1
+
+ #执行训练脚本,以下传参不需要修改,其他需要模型审视修改
+
+ python3 main.py
+
+
+done
+wait
+
+#训练结束时间,不需要修改
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+TrainingTime=`grep "Perf" $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log |awk 'END {print $7}'`
+
+
+#性能看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'/'${TrainingTime}'}'`
+
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+grep 'G_loss' $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $7}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = None" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/test/train_performance_1p.sh b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/test/train_performance_1p.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c5de21f1361bed0ddf38e40a28d977c9f2daccbb
--- /dev/null
+++ b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/test/train_performance_1p.sh
@@ -0,0 +1,160 @@
+#!/bin/bash
+
+#当前路径,不需要修改
+cur_path=`pwd`/../
+
+#集合通信参数,不需要修改
+
+export RANK_SIZE=1
+export JOB_ID=10087
+RANK_ID_START=0
+
+# 数据集路径,保持为空,不需要修改
+data_path=''
+#预训练模型地址
+ckpt_path=''
+
+#设置默认日志级别,不需要改
+#export ASCEND_GLOBAL_LOG_LEVEL=3
+#export ASCEND_DEVICE_ID=4
+
+#基础参数,需要模型审视修改
+#网络名称,同目录名称
+Network="PairedCycleGAN_ID1281_for_Tensorflow"
+#训练epoch
+epochs=2
+#训练batch_size
+batch_size=32
+
+#维测参数,precision_mode需要模型审视修改
+precision_mode="allow_mix_precision"
+#维持参数,以下不需要修改
+over_dump=False
+data_dump_flag=False
+data_dump_step="10"
+profiling=False
+
+# 帮助信息,不需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo"usage:./train_performance_1P.sh "
+ echo " "
+ echo "parameter explain:
+ --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision)
+ --over_dump if or not over detection, default is False
+ --data_dump_flag data dump flag, default is False
+ --data_dump_step data dump step, default is 10
+ --profiling if or not profiling for performance debug, default is False
+ --data_path source data of training
+ --ckpt_path model
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+#参数校验,不需要修改
+for para in $*
+do
+ if [[ $para == --precision_mode* ]];then
+ precision_mode=`echo ${para#*=}`
+ elif [[ $para == --over_dump* ]];then
+ over_dump=`echo ${para#*=}`
+ over_dump_path=${cur_path}/test/output/overflow_dump
+ mkdir -p ${over_dump_path}
+ elif [[ $para == --data_dump_flag* ]];then
+ data_dump_flag=`echo ${para#*=}`
+ data_dump_path=${cur_path}/test/output/data_dump
+ mkdir -p ${data_dump_path}
+ elif [[ $para == --data_dump_step* ]];then
+ data_dump_step=`echo ${para#*=}`
+ elif [[ $para == --profiling* ]];then
+ profiling=`echo ${para#*=}`
+ profiling_dump_path=${cur_path}/test/output/profiling
+ mkdir -p ${profiling_dump_path}
+ elif [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --ckpt_path* ]];then
+ ckpt_path=`echo ${para#*=}`
+ fi
+done
+# #校验是否传入data_path,不需要修改
+# if [[$data_path == ""]];then
+# echo "[Error] para \"data_path\" must be confing"
+# exit 1
+# fi
+
+#进入训练脚本目录,需要模型审视修改
+cd $cur_path/
+
+#创建DeviceID输出目录,不需要修改
+if [ -d ${cur_path}/test/output/${ASCEND_DEVICE_ID} ];then
+ rm -rf ${cur_path}/test/output/${ASCEND_DEVICE_ID}
+ mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt
+else
+ mkdir -p ${cur_path}/test/output/$ASCEND_DEVICE_ID/ckpt
+fi
+
+#------------------------------------------------------------------------------------------------------------------------
+#执行训练脚本,以下传参不需要修改,其他需要模型审视修改
+python3 main.py \
+ --data_dir=${data_path}/data \
+ --epoch=5 \
+ --batch_size=32 \
+ --train=True \
+ --sample_freq=3 \
+ --ckpt_freq=3 > ${cur_path}/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1
+wait
+#------------------------------------------------------------------------------------------------------------------------
+
+#训练结束时间,不需要修改
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+#结果打印,不需要修改
+echo "------------------ Final result ------------------"
+#输出性能FPS,需要模型审视修改
+#=============================================================================================================================
+TrainingTime=`grep 'time:' $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $10}'`
+FPS=`grep 'fps:' $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $12}'`
+#=============================================================================================================================
+
+#打印,不需要修改
+echo "Final Performance TrainingTime : $TrainingTime"
+echo "Final Performance images/sec : $FPS"
+#=============================================================================================================================
+#输出训练精度,需要模型审视修改
+train_accuracy=`grep "Mean-" $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk 'END {print $3}'`
+#=============================================================================================================================
+#打印,不需要修改
+echo "Final Train Accuracy : ${train_accuracy}"
+echo "E2E Training Duration sec : $e2e_time"
+
+#性能看护结果汇总
+#训练用例信息,不需要修改
+BatchSize=${batch_size}
+DeviceType=`uname -m`
+CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'perf'
+
+##获取性能数据,不需要修改
+#吞吐量
+ActualFPS=${FPS}
+#单迭代训练时长
+#TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${FPS}'/69}'`
+
+#=============================================================================================================================
+#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视
+grep 'G_loss' $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|awk '{print $7}' > $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt
+#=============================================================================================================================
+
+#最后一个迭代loss值,不需要修改
+ActualLoss=`awk 'END {print}' $cur_path/test/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt`
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${BatchSize}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = ${DeviceType}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = None" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/utils.py b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..74a5b2fc10480723da902cfdbe2e79d794c1260e
--- /dev/null
+++ b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/utils.py
@@ -0,0 +1,99 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import skimage
+import skimage.io
+import skimage.transform
+import numpy as np
+
+
+# synset = [l.strip() for l in open('synset.txt').readlines()]
+
+
+# returns image of shape [224, 224, 3]
+# [height, width, depth]
+def load_image(path):
+ # load image
+ img = skimage.io.imread(path)
+ img = img / 255.0
+ assert (0 <= img).all() and (img <= 1.0).all()
+ # print "Original Image Shape: ", img.shape
+ # we crop image from center
+ short_edge = min(img.shape[:2])
+ yy = int((img.shape[0] - short_edge) / 2)
+ xx = int((img.shape[1] - short_edge) / 2)
+ crop_img = img[yy: yy + short_edge, xx: xx + short_edge]
+ # resize to 224, 224
+ resized_img = skimage.transform.resize(crop_img, (224, 224))
+ return resized_img
+
+
+# returns the top1 string
+def print_prob(prob, file_path):
+ synset = [l.strip() for l in open(file_path).readlines()]
+
+ # print prob
+ pred = np.argsort(prob)[::-1]
+
+ # Get top1 label
+ top1 = synset[pred[0]]
+ print(("Top1: ", top1, prob[pred[0]]))
+ # Get top5 label
+ top5 = [(synset[pred[i]], prob[pred[i]]) for i in range(5)]
+ print(("Top5: ", top5))
+ return top1
+
+
+def load_image2(path, height=None, width=None):
+ # load image
+ img = skimage.io.imread(path)
+ img = img / 255.0
+ if height is not None and width is not None:
+ ny = height
+ nx = width
+ elif height is not None:
+ ny = height
+ nx = img.shape[1] * ny / img.shape[0]
+ elif width is not None:
+ nx = width
+ ny = img.shape[0] * nx / img.shape[1]
+ else:
+ ny = img.shape[0]
+ nx = img.shape[1]
+ return skimage.transform.resize(img, (ny, nx))
+
+
+def test():
+ img = skimage.io.imread("./test_data/starry_night.jpg")
+ ny = 300
+ nx = img.shape[1] * ny / img.shape[0]
+ img = skimage.transform.resize(img, (ny, nx))
+ skimage.io.imsave("./test_data/test/output.jpg", img)
+
+
+if __name__ == "__main__":
+ test()
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/vgg16.py b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/vgg16.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd0b64865c903752d78ad327550be88d9620ac19
--- /dev/null
+++ b/TensorFlow/contrib/cv/pairedcyclegan/PairedCycleGAN_ID1281_for_Tensorflow/vgg16.py
@@ -0,0 +1,176 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import inspect
+import os
+
+import numpy as np
+import tensorflow as tf
+import time
+
+VGG_MEAN = [103.939, 116.779, 123.68]
+
+
+class Vgg16:
+ def __init__(self, vgg16_npy_path=None):
+ self.fc6 = ""
+ self.conv3_1 = ""
+ self.pool4 = ""
+ self.conv2_1 = ""
+ self.conv1_2 = ""
+ self.conv5_3 = ""
+ self.conv3_2 = ""
+ self.pool3 = ""
+ self.prob = ""
+ self.conv1_1 = ""
+ self.conv5_2 = ""
+ self.conv4_3 = ""
+ self.conv3_3 = ""
+ self.pool2 = ""
+ self.relu6 = ""
+ self.fc7 = ""
+ self.conv5_1 = ""
+ self.conv4_1 = ""
+ self.conv4_2 = ""
+ self.pool1 = ""
+ self.pool5 = ""
+ self.relu7 = ""
+ self.fc8 = ""
+ self.conv2_2 = ""
+ if vgg16_npy_path is None:
+ path = inspect.getfile(Vgg16)
+ path = os.path.abspath(os.path.join(path, os.pardir))
+ path = os.path.join(path, "vgg16.npy")
+ vgg16_npy_path = path
+ print(path)
+
+ # self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
+ self.data_dict = np.load(vgg16_npy_path,allow_pickle=True, encoding='latin1').item()
+ print("npy file loaded")
+
+ def build(self, rgb):
+ """
+ load variable from npy to build the VGG
+ :param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
+ """
+
+ start_time = time.time()
+ print("build model started")
+ rgb_scaled = rgb * 255.0
+
+ # Convert RGB to BGR
+ red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)
+ assert red.get_shape().as_list()[1:] == [224, 224, 1]
+ assert green.get_shape().as_list()[1:] == [224, 224, 1]
+ assert blue.get_shape().as_list()[1:] == [224, 224, 1]
+ bgr = tf.concat(axis=3, values=[
+ blue - VGG_MEAN[0],
+ green - VGG_MEAN[1],
+ red - VGG_MEAN[2],
+ ])
+ assert bgr.get_shape().as_list()[1:] == [224, 224, 3]
+
+ self.conv1_1 = self.conv_layer(bgr, "conv1_1")
+ self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
+ self.pool1 = self.max_pool(self.conv1_2, 'pool1')
+
+ self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
+ self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
+ self.pool2 = self.max_pool(self.conv2_2, 'pool2')
+
+ self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
+ self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
+ self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
+ self.pool3 = self.max_pool(self.conv3_3, 'pool3')
+
+ self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
+ self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
+ self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
+ self.pool4 = self.max_pool(self.conv4_3, 'pool4')
+
+ self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
+ self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
+ self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
+ self.pool5 = self.max_pool(self.conv5_3, 'pool5')
+
+ self.fc6 = self.fc_layer(self.pool5, "fc6")
+ assert self.fc6.get_shape().as_list()[1:] == [4096]
+ self.relu6 = tf.nn.relu(self.fc6)
+
+ self.fc7 = self.fc_layer(self.relu6, "fc7")
+ self.relu7 = tf.nn.relu(self.fc7)
+
+ self.fc8 = self.fc_layer(self.relu7, "fc8")
+
+ self.prob = tf.nn.softmax(self.fc8, name="prob")
+
+ self.data_dict = None
+ print(("build model finished: %ds" % (time.time() - start_time)))
+
+ def avg_pool(self, bottom, name):
+ return tf.nn.avg_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
+
+ def max_pool(self, bottom, name):
+ return tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)
+
+ def conv_layer(self, bottom, name):
+ with tf.variable_scope(name):
+ filt = self.get_conv_filter(name)
+
+ conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME')
+
+ conv_biases = self.get_bias(name)
+ bias = tf.nn.bias_add(conv, conv_biases)
+
+ relu = tf.nn.relu(bias)
+ return relu
+
+ def fc_layer(self, bottom, name):
+ with tf.variable_scope(name):
+ shape = bottom.get_shape().as_list()
+ dim = 1
+ for d in shape[1:]:
+ dim *= d
+ x = tf.reshape(bottom, [-1, dim])
+
+ weights = self.get_fc_weight(name)
+ biases = self.get_bias(name)
+
+ # Fully connected layer. Note that the '+' operation automatically
+ # broadcasts the biases.
+ fc = tf.nn.bias_add(tf.matmul(x, weights), biases)
+
+ return fc
+
+ def get_conv_filter(self, name):
+ return tf.constant(self.data_dict[name][0], name="filter")
+
+ def get_bias(self, name):
+ return tf.constant(self.data_dict[name][1], name="biases")
+
+ def get_fc_weight(self, name):
+ return tf.constant(self.data_dict[name][0], name="weights")
\ No newline at end of file