From 30847598411c59d430083748327d218e4392717e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 03:11:14 +0000
Subject: [PATCH 01/26] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20SRFBN=5Ffor=5FTensor?=
=?UTF-8?q?Flow?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/.keep | 0
1 file changed, 0 insertions(+), 0 deletions(-)
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/.keep
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/.keep b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/.keep
new file mode 100644
index 000000000..e69de29bb
--
Gitee
From 95bf83e17801b71ed53d439e043cf4258adabc95 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 03:11:52 +0000
Subject: [PATCH 02/26] =?UTF-8?q?=E6=8F=90=E4=BA=A4=E6=96=87=E4=BB=B6?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../cv/SRFBN_for_TensorFlow/Basic_Model.py | 144 +++++
.../contrib/cv/SRFBN_for_TensorFlow/LICENSE | 284 ++++++++++
.../cv/SRFBN_for_TensorFlow/PreProcess.py | 70 +++
.../contrib/cv/SRFBN_for_TensorFlow/README.md | 118 ++++
.../cv/SRFBN_for_TensorFlow/SRFBN_model.py | 181 +++++++
.../contrib/cv/SRFBN_for_TensorFlow/config.py | 53 ++
.../cv/SRFBN_for_TensorFlow/psnr_ssim.py | 86 +++
.../cv/SRFBN_for_TensorFlow/requirements.txt | 4 +
.../contrib/cv/SRFBN_for_TensorFlow/test.py | 61 +++
.../SRFBN_for_TensorFlow/traditional_blur.py | 508 ++++++++++++++++++
.../contrib/cv/SRFBN_for_TensorFlow/train.py | 124 +++++
11 files changed, 1633 insertions(+)
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/LICENSE
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/psnr_ssim.py
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/requirements.txt
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/traditional_blur.py
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py
new file mode 100644
index 000000000..e902cf5b6
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py
@@ -0,0 +1,144 @@
+from npu_bridge.npu_init import *
+import tensorflow as tf
+from tensorflow.contrib import layers
+import sys
+
+class basic_network(object):
+ #初始化网络配置
+ def __init__(self, cfg):
+ self.training=True
+ self.cfg = cfg
+ self.params_count = 0#参数数量
+ #初始化参数
+ def init_params(self, *args, **kwargs):
+ def _variable_on_cpu(w_shape, b_shape, weight_decay=0.99, use_bias=True, name="conv"):
+ with tf.device('/cpu:0'):#使用cpu
+ w = tf.Variable(tf.truncated_normal(w_shape, 0.0, 0.001), trainable=True, name="%s_w" % name)#权重
+ tf.add_to_collection(name="weights_l2_loss", value=self.calc_l1_loss(w, weight_decay))#将value加入name为'weights_l2_loss'的列表中
+ b = tf.Variable(tf.zeros(b_shape), trainable=use_bias, name="%s_b" % name)#偏置
+ return w, b #返回参数w和b
+ kernel_size = kwargs["kernel_size"]#kwargs为字典
+ in_channels = kwargs["in_channels"]
+ out_channels = kwargs["out_channels"]
+ # weight_decay = kwargs["weight_decay"]
+ w_shape = [kernel_size, kernel_size, in_channels, out_channels]#权重的size
+ b_shape = [out_channels]#b的size
+ name = kwargs["name"]
+ self.params_count += kernel_size*kernel_size*in_channels*out_channels
+ self.params_count += out_channels#参数的数量
+ return _variable_on_cpu(w_shape, b_shape, use_bias=kwargs["use_bias"], name=name)#返回初始化后的w和b
+ #计算代价函数,L1和L2
+ def calc_loss(self, *args, **kwargs):
+ loss_type = kwargs["loss_type"]
+ x = kwargs["x"]
+ y = kwargs["y"]
+ if loss_type == "L1":#代价函数类型
+ return tf.reduce_sum(tf.abs(x-y), name="L1_loss")
+ elif loss_type == "L2":
+ return tf.nn.l2_loss((x-y), name="L2_loss")
+ #激活函数
+ def activation(self, *args, **kwargs):
+ act_type = kwargs["act_type"]
+ act_type = act_type.lower()
+ if act_type == "relu":
+ return tf.nn.relu(args[0])
+ elif act_type == "lrelu":
+ slope = kwargs["slope"]
+ y = slope*args[0]
+ return tf.maximum(args[0], y)
+ elif act_type == "prelu":
+ return tf.nn.leaky_relu(args[0], alpha=0.2)
+ elif act_type == "tanh":
+ return tf.nn.tanh(args[0])
+ else:
+ return args[0]
+ #计算L2型代价函数
+ def calc_l2_loss(self, weight, weight_decay):
+ _, _, _, outchannel = weight.get_shape().as_list()#这里只需要获取输出的channel数
+ return (weight_decay) * tf.reduce_sum(tf.square(weight)) / outchannel
+ #计算L1型代价函数
+ def calc_l1_loss(self, weight, weight_decay):
+ _, _, _, outchannel = weight.get_shape().as_list()
+ return (weight_decay)*tf.reduce_sum(tf.abs(weight)) / outchannel
+ #批归一化处理
+ def batch_norm(self, *args, **kwargs):
+ return tf.layers.batch_normalization(args[0], training=kwargs["training"])#第一个参数是输入
+ #归一化的一种方法
+ def instance_norm(self, *args, **kwargs):
+ return layers.instance_norm(args[0], kwargs["name"])
+ #激活函数的一种
+ def hard_sigmoid(self, x):
+ return tf.nn.relu6((x+3)/6)
+
+ def hard_swish(self, x):
+ return x * self.hard_sigmoid(x)
+ #平均池化
+ def global_average_pooling(self, x, name="GAP"):
+ return tf.reduce_mean(x, axis=[1, 2], keep_dims=True, name="Global_Average_Pooling_%s" % name)#不降维
+
+ #定义卷积块
+ def ConvBlock(self,x, in_channels, out_channels, kernel_size, stride=1, name="ConvBlock",
+ BN=True, use_bias=True, padding="VALID", act_type="relu", mode="CNA"):
+
+
+ assert (mode in ['CNA', 'NAC']), '[ERROR] Wrong mode in [%s]!' % sys.modules[__name__]#断言
+ weight, bias = self.init_params(kernel_size=kernel_size, in_channels=in_channels,
+ out_channels=out_channels, use_bias=use_bias, name=name)
+ if mode == "CNA":#先卷积再归一化再激活
+ x = tf.nn.conv2d(x, filter=weight, strides=[1, stride, stride, 1], padding=padding)
+ x = tf.nn.bias_add(x, bias)
+ if BN:
+ if self.cfg.BN_type == "BN":
+ x = self.batch_norm(x, training=self.cfg.istrain)
+ elif self.cfg.BN_type == "IN":
+ x = self.instance_norm(x, name="%s_IN"%name)
+ else:
+ raise NotImplementedError('[ERROR] BN type [%s] is not implemented!' % self.cfg.BN_type)
+ x = self.activation(x, act_type=act_type)
+ return x
+ elif mode=="NAC":
+ if BN:
+ if self.cfg.BN_type == "BN":
+ x = self.batch_norm(x, training=self.cfg.istrain)
+ elif self.cfg.BN_type == "IN":
+ x = self.instance_norm(x, name="%s_IN" % name)
+ else:
+ raise NotImplementedError('[ERROR] BN type [%s] is not implemented!' % self.cfg.BN_type)
+ x = self.activation(x, act_type=act_type)
+ x = tf.nn.conv2d(x, filter=weight, strides=[1, stride, stride, 1], padding=padding)
+ x = tf.nn.bias_add(x, bias)
+ return x
+ #反卷积块(上采样upsampling)
+ def DeConvBlock(self, x, in_channels, out_channels, kernel_size, stride=1, name="DeConvBlock",
+ BN=True, use_bias=True, padding="VALID", act_type="relu", mode="CNA"):
+ assert (mode in ['CNA', 'NAC']), '[ERROR] Wrong mode in [%s]!' % sys.modules[__name__]
+ b, h, w, c = x.get_shape().as_list()#h为图像高度,w为宽度,b为batch,即一次处理的样本数,c为颜色通道数
+ out_shape = [b, h * self.cfg.scale, w * self.cfg.scale, out_channels]
+ weight, bias = self.init_params(kernel_size=kernel_size, in_channels=out_channels,
+ out_channels=in_channels, use_bias=use_bias, name=name)
+ if mode == "CNA":#反卷积函数(扩大图像尺寸)(其实也是卷积)
+ x = tf.nn.conv2d_transpose(x, filter=weight, output_shape=out_shape,
+ strides=[1, stride, stride, 1], padding=padding)
+ x = tf.nn.bias_add(x, bias)
+ if BN:
+ if self.cfg.BN_type == "BN":
+ x = self.batch_norm(x, training=True)
+ elif self.cfg.BN_type == "IN":
+ x = self.instance_norm(x, name="%s_IN" % name)
+ else:
+ raise NotImplementedError('[ERROR] BN type [%s] is not implemented!' % self.cfg.BN_type)
+ x = self.activation(x, act_type=act_type)
+ return x
+ elif mode == "NAC":
+ if BN:
+ if self.cfg.BN_type == "BN":
+ x = self.batch_norm(x, training=True)
+ elif self.cfg.BN_type == "IN":
+ x = self.instance_norm(x, name="%s_IN" % name)
+ else:
+ raise NotImplementedError('[ERROR] BN type [%s] is not implemented!' % self.cfg.BN_type)
+ x = self.activation(x, act_type=act_type)
+ x = tf.nn.conv2d_transpose(x, filter=weight, output_shape=out_shape,
+ strides=[1, stride, stride, 1], padding=padding)
+ x = tf.nn.bias_add(x, bias)
+ return x
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/LICENSE b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/LICENSE
new file mode 100644
index 000000000..5ea8a5f7b
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/LICENSE
@@ -0,0 +1,284 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+------------------
+Files: third_party/compute_library/...
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+------------------
+Files: ACKNOWLEDGEMENTS
+LICENSE
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+------------------
+Files: third_party/hexagon
+
+Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted (subject to the limitations in the
+disclaimer below) provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of The Linux Foundation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
+GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
+HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
new file mode 100644
index 000000000..c38999b3c
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
@@ -0,0 +1,70 @@
+from npu_bridge.npu_init import *
+import cv2
+import numpy as np
+import random
+#from skimage import util
+
+#给图像数据添加噪声
+#def add_noise(img):
+ # mode_types = ['gaussian', 'localvar', 'poisson', 'speckle'] # 'salt', 'pepper', 's&p'这三个噪声太假了
+ # inx = int(np.random.choice(np.arange(len(mode_types)), 1))
+ # # inx = 0
+ # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)#转换色彩空间为RGB
+ # mean = random.random() * 0.001 # + 0.001#random.random()生成0到1之间的随机数
+ # var = random.random() * 0.002 # + 0.01
+ # noise_img = util.random_noise(img.copy(), mode=mode_types[inx],
+ # mean=mean,
+ # var=var)#添加噪声
+ # return noise_img
+
+#数据扩充或增强
+def augment_data(img_patch, flip, rot): # img_patchs : n,h,w,c
+ if flip==1:
+ img_patch = img_patch[:, ::-1, :] # hflip#水平翻转
+ elif flip==2:
+ img_patch = img_patch[::-1, :, :] # vflip#垂直翻转
+ if rot==1:
+ img_patch = cv2.rotate(img_patch, cv2.ROTATE_90_CLOCKWISE)#顺时针旋转90
+ elif rot==2:
+ img_patch = cv2.rotate(img_patch, cv2.ROTATE_90_COUNTERCLOCKWISE)
+ return img_patch
+#预处理数据
+def preprocess(imgs, cfg):
+ LR_patchs, HR_patchs = [], []
+ for img in imgs:
+
+ HR = cv2.imread(img.strip(), cv2.IMREAD_COLOR)#读取图片路径,并以RGB模式
+ HR = (HR - 127.5) / 128#归一化
+ h, w, c = HR.shape#高度,宽度,颜色通道数
+
+ x_stride = w // (cfg.imagesize * cfg.scale)
+ y_stride = h // (cfg.imagesize * cfg.scale)
+
+ for x in range(x_stride):
+ for y in range(y_stride):
+ HR_patch = HR[y * cfg.imagesize * cfg.scale:(y + 1) * cfg.imagesize * cfg.scale,
+ x * cfg.imagesize * cfg.scale:(x + 1) * cfg.imagesize * cfg.scale, :]
+ # add noise && add blur
+ t = np.random.randint(0, 2, 1)
+ if t == 0:
+ LR_patch = cv2.resize(HR_patch, dsize=None, fx=1 / cfg.scale, fy=1 / cfg.scale,
+ interpolation=cv2.INTER_CUBIC)
+ LR_patch = np.clip(LR_patch, -1.0, 1.0)
+ #LR_patch = add_noise(LR_patch)
+ else:
+ #LR_patch = add_noise(HR_patch) # [-1, 1]
+ LR_patch = cv2.resize(HR_patch, dsize=None, fx=1 / cfg.scale,
+ fy=1 / cfg.scale, interpolation=cv2.INTER_LINEAR)
+ # data augment
+ if cfg.istrain:
+ rot = np.random.randint(0, 3, 1)
+ flip = np.random.randint(0, 3, 1)
+ LR_patch = augment_data(LR_patch, flip, rot)
+ HR_patch = augment_data(HR_patch, flip, rot)
+ LR_patchs.append(LR_patch)
+ HR_patchs.append(HR_patch)
+
+ return HR_patchs, LR_patchs
+
+
+
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md
new file mode 100644
index 000000000..79c405514
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md
@@ -0,0 +1,118 @@
+- [基本信息](#基本信息.md)
+- [概述](#概述.md)
+- [训练环境准备](#训练环境准备.md)
+- [快速上手](#快速上手.md)
+- [训练结果](#训练结果.md)
+- [高级参考](#高级参考.md)
+
基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):Computer Vision**
+
+**修改时间(Modified) :2022.11.6**
+
+**框架(Framework):TensorFlow 1.15.0**
+
+**描述(Description):基于TensorFlow框架对高清图片重建相应的超分辨率图片的训练代码**
+
+概述
+
+```
+SRFBN是采取反馈连接来提高重建超分辨率图片效果的网络模型
+```
+- 参考论文:
+
+ https://arxiv.org/abs/1903.09814v2
+
+- 参考实现:
+
+ https://github.com/turboLIU/SRFBN-tensorflow/blob/master/train.py
+
+## 默认配置
+
+- 训练数据集预处理:
+
+ - 图像的输入尺寸为64*64
+- 测试数据集预处理:
+
+ - 图像的输入尺寸为64*64
+- 训练超参
+
+ - Batch size: 1
+ - Train epoch: 1000
+
+
+快速上手
+
+- 数据集准备
+1. 模型训练使用DIV2K数据集。
+
+## 模型训练
+
+- 单卡训练
+
+ 1. 配置训练参数。
+
+ 首先在脚本test/train_performance_1p.sh中,配置batch_size、epochs、data_path等参数,请用户根据实际路径配置data_path,或者在启动训练的命令行中以参数形式下发。
+
+ ```
+ batch_size=1
+ epochs=1000
+ data_path="../DIV2K/DIV2K_train_HR"
+ ```
+
+ 2. 启动训练。
+
+ 启动单卡训练 (脚本为SRFBN_for_TensorFlow/test/train_performance_1p.sh)
+
+ ```
+ bash train_performance_1p.sh --data_path=../DIV2K/DIV2K_train_HR
+ ```
+
+训练结果
+
+- 精度结果比对
+
+| 精度指标项 | GPU实测 | NPU实测 |
+| ---------- | ----------- | ----------- |
+| PSNR | 6.706763287 | 5.831956861 |
+
+- 性能结果比对
+
+| 性能指标项 | GPU实测 | NPU实测 |
+| ---------- | -------------- | -------------- |
+| FPS | 3.358950029841 | 4.976489075014 |
+
+
+高级参考
+
+## 脚本和示例代码
+
+```
+├── Basic_Model.py //基本模型代码
+├── README.md //代码说明文档
+├── config.py //模型配置代码
+├── PreProcess.py //数据预处理代码
+├── psnr_ssim.py //图像质量评估代码
+├── requirements.txt //训练python依赖列表
+├── SRFBN_model.py //SRFBN网络模型代码
+├── test.py //测试代码
+├── traditional_blur.py //图像模糊处理代码
+├── train.py //训练代码
+├── test
+│ ├──train_performance_1p.sh //单卡训练验证性能启动脚本
+
+```
+
+## 脚本参数
+
+```
+--data_path 数据集路径,默认:path/data
+--batch_size 每个NPU的batch size,默认:1
+--epochs 训练epcoh数量,默认:1000
+```
+
+## 训练过程
+
+1. 通过“模型训练”中的训练指令启动单卡训练。
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py
new file mode 100644
index 000000000..a3e01bac3
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py
@@ -0,0 +1,181 @@
+from npu_bridge.npu_init import *
+import tensorflow as tf
+import os
+from Basic_Model import basic_network
+
+
+class SRFBN(basic_network):
+ def __init__(self, sess, cfg):
+ super(SRFBN, self).__init__(cfg)
+ self.sess = sess
+ imageshape = [cfg.batchsize, cfg.imagesize, cfg.imagesize, cfg.c_dim]
+ labelshape = [cfg.batchsize, cfg.imagesize * cfg.scale, cfg.imagesize * cfg.scale, cfg.c_dim]
+ self.imageplaceholder = tf.placeholder(dtype=tf.float32, shape=imageshape, name="image")
+ self.labelplaceholder = tf.placeholder(dtype=tf.float32, shape=labelshape, name="label")
+ self.last_hidden = None
+ self.should_reset = True
+ self.outs = []
+ #FB块
+ def FeedBackBlock(self, x, num_features, num_groups, act_type, name="FBB"):
+ if self.cfg.scale == 1:
+ stride = 1
+ padding = "SAME"
+ kernel_size = 5
+ if self.cfg.scale==2:
+ stride = 2
+ padding = "SAME"
+ kernel_size = 6
+ if self.cfg.scale == 3:
+ stride = 3
+ padding = "SAME"
+ kernel_size = 7
+ if self.cfg.scale == 4:
+ stride = 4
+ padding = "SAME"
+ kernel_size = 8
+ if self.should_reset:
+ self.last_hidden = x
+ self.should_reset = False
+ x = tf.concat([x, self.last_hidden], 3)
+ x = self.ConvBlock(x, 2*num_features, num_features, kernel_size=1, name="FeedBack_compress_in",
+ act_type=act_type)
+
+ lr_features = []
+ hr_features = []
+ lr_features.append(x)
+ for i in range(num_groups):
+ x = tf.concat(lr_features, 3)
+ if i > 0:
+ x = self.ConvBlock(x, num_features*(i+1), num_features, kernel_size=1,stride=1,
+ padding=padding, act_type=act_type, name="%s_%d"%(name, i))
+ x = self.DeConvBlock(x, num_features, num_features, kernel_size=kernel_size, stride=stride,
+ padding=padding, act_type=act_type, name="%s_%d"%(name, i))
+ hr_features.append(x)
+ x = tf.concat(hr_features, 3)
+ if i > 0:
+ x = self.ConvBlock(x, num_features*(i+1), num_features, kernel_size=1, stride=1,
+ padding=padding, act_type=act_type, name="%s_%d"%(name, i))
+ x = self.ConvBlock(x, num_features, num_features, kernel_size=kernel_size, stride=stride,
+ padding=padding, act_type=act_type, name="%s_%d"%(name, i))
+ lr_features.append(x)
+ del hr_features
+
+ x = tf.concat(lr_features[1:], 3)
+
+ x = self.ConvBlock(x, num_features*num_groups, num_features, kernel_size=1,
+ act_type=act_type, name="FeedBack_compress_out")
+
+ self.last_hidden = x
+
+ return x
+
+ def build(self):
+ if self.cfg.scale == 2:
+ stride = 2
+ padding = "SAME"
+ kernel_size = 6
+ if self.cfg.scale == 3:
+ stride = 3
+ padding = "SAME"
+ kernel_size = 7
+ if self.cfg.scale == 4:
+ stride = 4
+ padding = "SAME"
+ kernel_size = 8
+ # x = self.sub_mean(self.imageplaceholder) # 暂且当作归一化
+
+ _, height, width, _ = self.imageplaceholder.get_shape().as_list()
+
+ inter_size = tf.constant([height*self.cfg.scale, width*self.cfg.scale])
+ inter_res = tf.image.resize_images(self.imageplaceholder, inter_size)
+ # inter_res = self.imageplaceholder
+
+ x = self.ConvBlock(self.imageplaceholder, self.cfg.in_channels, 4 * self.cfg.num_features, kernel_size=3,
+ act_type=self.cfg.act_type, padding="SAME", name="conv_in")
+ x = self.ConvBlock(x, 4*self.cfg.num_features, self.cfg.num_features, kernel_size=1,
+ act_type=self.cfg.act_type, padding="SAME", name="feat_in")
+ # outs = []
+ for i in range(self.cfg.num_steps):
+ if i == 0:
+ self.should_reset=True
+ t = self.FeedBackBlock(x, self.cfg.num_features, self.cfg.num_groups, self.cfg.act_type, name="FBB_%d"%i)
+ t = self.DeConvBlock(t, self.cfg.num_features, self.cfg.num_features, kernel_size=kernel_size,
+ stride=stride, padding=padding, act_type="relu", name="out_%d"%i)
+ t = self.ConvBlock(t, self.cfg.num_features, self.cfg.out_channels, kernel_size=3, stride=1,
+ act_type="tanh", padding="SAME", name="conv_out")
+ t = inter_res + t
+ t = tf.clip_by_value(t, -1.0, 1.0)
+ # t = t + inter_res
+ # t = self.add_mean(t)
+ self.outs.append(t)
+ #训练步骤
+ def train_step(self):
+ self.build()
+ print("This Net has Params num is %f MB" % (self.params_count * 4 / 1024 / 1024)) # float32
+ tf.summary.image("image/HR", self.labelplaceholder, max_outputs=1)
+ out = tf.add_n(self.outs)/self.cfg.num_steps
+
+ tf.summary.image("image/SR", out, max_outputs=1)
+ tf.summary.image("image/LR", self.imageplaceholder, max_outputs=1)
+
+ self.l2_regularization_loss = tf.reduce_sum(tf.get_collection("weights_l2_loss"))
+
+ self.losses = [self.calc_loss(x=x, y=self.labelplaceholder, loss_type=self.cfg.loss_type) for x in self.outs]
+ self.losses = tf.reduce_sum(self.losses)/len(self.losses)/self.cfg.batchsize + self.l2_regularization_loss
+
+ tf.summary.scalar('loss/total', self.losses)
+ tf.summary.scalar('loss/l2_loss', self.l2_regularization_loss)
+
+ self.merged_summary = tf.summary.merge_all()
+ self.saver = tf.train.Saver(max_to_keep=1)
+ #加载检查点
+ def load(self):
+ model_name = "SRFBN.model"
+ model_dir = "%s_%s_%s_%s_c%d_x%s" % (
+ "SRFBN", self.cfg.num_features, self.cfg.num_steps, self.cfg.num_groups, self.cfg.c_dim, self.cfg.scale)
+ checkpoint_dir = os.path.join(self.cfg.checkpoint_dir, model_dir)
+ ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
+ if ckpt and ckpt.model_checkpoint_path:
+ ckpt_path = str(ckpt.model_checkpoint_path)
+ self.saver.restore(self.sess, os.path.join(os.getcwd(), ckpt_path))
+ step = int(os.path.basename(ckpt_path).split('-')[1])
+ print("\nCheckpoint Loading Success! %s\n" % ckpt_path)
+ else:
+ step = 0
+ print("\nCheckpoint Loading Failed! \n")
+
+ return step
+ #保存当前模型
+ def save(self, step):
+ model_name = "SRFBN.model"
+ model_dir = "%s_%s_%s_%s_c%d_x%s" % \
+ ("SRFBN", self.cfg.num_features, self.cfg.num_steps,
+ self.cfg.num_groups, self.cfg.c_dim, self.cfg.scale)
+ checkpoint_dir = os.path.join(self.cfg.checkpoint_dir, model_dir)
+
+ if not os.path.exists(checkpoint_dir):
+ os.makedirs(checkpoint_dir)
+
+ self.saver.save(self.sess,
+ os.path.join(checkpoint_dir, model_name),
+ global_step=step)
+ #测试
+ def test(self, width, height):
+ self.cfg.batchsize = 1
+ testshape = [self.cfg.batchsize, height, width, self.cfg.c_dim]
+ labelshape = [self.cfg.batchsize, height*self.cfg.scale, width*self.cfg.scale, self.cfg.c_dim]
+ self.imageplaceholder = tf.placeholder(dtype=tf.float32, shape=testshape)
+ self.labelplaceholder = tf.placeholder(dtype=tf.float32, shape=labelshape)
+ self.build()
+ # self.outs = [self.add_mean(x) for x in self.outs]
+ out = tf.add_n(self.outs)/self.cfg.num_steps
+ # out = tf.concat(self.outs, -1)
+ return out
+
+
+if __name__ == '__main__':
+ from config import SRFBN_config as config
+ cfg = config()
+ sess = tf.Session(config=npu_config_proto())
+ net = SRFBN(sess, cfg)
+ train_step = net.train_step()
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py
new file mode 100644
index 000000000..d6d3b5592
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py
@@ -0,0 +1,53 @@
+from npu_bridge.npu_init import *
+import os
+
+class config:
+ def __init__(self):
+ self.batchsize = 1#一次处理的样本数量
+ self.Process_num = 3 #进程数量
+ self.maxsize = 200 #最大的大小
+ self.ngpu = 1 #gpu数量
+ self.imagesize = 64#图片大小
+ self.scale = 3#缩放规模
+ self.epoch = 1000#迭代次数
+ #创建检查点,记录,结果目录
+ self.checkpoint_dir = "./model"#检查点目录
+ if not os.path.exists(self.checkpoint_dir):
+ os.mkdir(self.checkpoint_dir)
+ self.log_dir = "./log"
+ if not os.path.exists(self.log_dir):
+ os.mkdir(self.log_dir)
+ self.result = "./result"
+ if not os.path.exists(self.result):
+ os.mkdir(self.result)
+
+
+
+class SRFBN_config(config):
+ def __init__(self):
+ super(SRFBN_config, self).__init__()
+ self.istrain = True#正在训练还是正在测试
+ self.istest = not self.istrain
+ self.c_dim = 3 #color channel 可以训练灰度图也可以训练RGB图
+ self.in_channels = 3
+ self.out_channels = 3
+ self.num_features = 32#base number of filter
+ self.num_steps = 4#时间步
+ self.num_groups = 6#FBB中feedbackblock中projection group数量
+ self.BN = True#
+ if self.BN:
+ self.BN_type = "BN" # "BN" # or "IN"
+ self.act_type = "prelu" #activation function
+ self.loss_type = "L2"
+ self.lr_steps = [150, 300, 550, 750]#迭代次数表
+ self.lr_gama = 1#参数
+ self.learning_rate = 2e-7#学习率
+ self.load_premodel = True
+ #创建目录
+ self.srfbn_logdir = "%s/srfbn" % self.log_dir
+ if not os.path.exists(self.srfbn_logdir):
+ os.mkdir(self.srfbn_logdir)
+ self.srfbn_result = "%s/srfbn" % self.result
+ if not os.path.exists(self.srfbn_result):
+ os.mkdir(self.srfbn_result)
+
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/psnr_ssim.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/psnr_ssim.py
new file mode 100644
index 000000000..c27924230
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/psnr_ssim.py
@@ -0,0 +1,86 @@
+import numpy as np
+import cv2
+
+#灰度图psnr
+def compare_psnr_gray(im1,im2,maxI=255):
+ im1=im1.astype(np.float64)
+ im2=im2.astype(np.float64)
+ diff=im1-im2
+ mse=np.mean(np.square(diff))
+ if mse==0: return float('inf')
+ return 10*np.log10(maxI*maxI/mse)
+
+#彩色图psnr
+def compare_psnr_rgb(im1,im2,maxI=255):
+ blue1,green1,red1=cv2.split(im1)
+ blue2,green2,red2=cv2.split(im2)
+ psnr_blue=compare_psnr_gray(blue1,blue2,maxI)
+ psnr_green=compare_psnr_gray(green1,green2,maxI)
+ psnr_red=compare_psnr_gray(red1,red2,maxI)
+
+ #三通道取平均值
+ return (psnr_blue+psnr_green+psnr_red)/3
+
+#可以直接用的(不能把灰度图和彩色图直接比较)
+def compare_psnr(im1,im2,maxI=255):
+ #如果两张图片大小不同或者同为同一类型(灰度、彩色)图就报错
+ if im1.shape!=im2.shape: raise ValueError("错误:图片大小维度不同")
+ if (im1.ndim==2) and (im2.ndim==2): return compare_psnr_gray(im1,im2)
+ #彩色图里可能有单通道(?)
+ elif (im1.ndim==3) and (im2.ndim==3):
+ if im1.shape[2]==3:
+ return compare_psnr_rgb(im1,im2)
+ elif im1.shape[2]==1:
+ return compare_psnr_gray(np.squeeze(im1),np.squeeze(im2))
+ else: raise ValueError("错误:错误的图片维度")
+
+#ssim
+def ssim(im1,im2,maxI=255):
+ #0.01和0.03是已经定下来的常数,不要改
+ c1=(0.01*maxI)**2
+ c2=(0.03*maxI)**2
+
+ #转换成float64类型
+ im1=im1.astype(np.float64)
+ im2=im2.astype(np.float64)
+ #高斯核,这里11和1.5也是定下来的不要改
+ kernel=cv2.getGaussianKernel(11,1.5)
+ window=np.outer(kernel,kernel.transpose())
+
+ #求卷积
+ #ssim先将图片分成若干个小块后分别按照公式的各元素求各种卷积
+ mu1=cv2.filter2D(im1,-1,window)[5:-5,5:-5]
+ mu2=cv2.filter2D(im2,-1,window)[5:-5,5:-5]
+ mu1_sq=mu1**2
+ mu2_sq=mu2**2
+ mu1_mu2=mu1*mu2
+ sigma1_sq=cv2.filter2D(im1**2,-1,window)[5:-5,5:-5]-mu1_sq
+ sigma2_sq=cv2.filter2D(im2**2,-1,window)[5:-5,5:-5]-mu2_sq
+ sigma12=cv2.filter2D(im1*im2,-1,window)[5:-5,5:-5]-mu1_mu2
+
+ #ssim的计算公式
+ ssim_map=((2*mu1_mu2+c1)*(2*sigma12+c2))/((mu1_sq+mu2_sq+c1)*(sigma1_sq+sigma2_sq+c2))
+ #取所有小块计算结果的平均值
+ return ssim_map.mean()
+
+#可以直接用的
+def compare_ssim(im1,im2,maxI=255):
+ #如果两张图片大小不同或者同为同一类型(灰度、彩色)图就报错
+ if im1.shape!=im2.shape:
+ raise ValueError("错误:图片维度大小不同")
+ if im1.ndim==2:
+ return ssim(im1,im2)
+ #彩色图里可能有单通道(?)
+ elif im1.ndim==3:
+ if im1.shape[2]==3:
+ blue1,green1,red1=cv2.split(im1)
+ blue2,green2,red2=cv2.split(im2)
+ ssim_blue=ssim(blue1,blue2)
+ ssim_green=ssim(green1,green2)
+ ssim_red=ssim(red1,red2)
+
+ #同psnr,取平均值
+ return (ssim_blue+ssim_green+ssim_red)/3
+ elif im1.shape[2]==1:
+ return ssim(np.squeeze(im1),np.squeeze(im2))
+ else: raise ValueError("错误:错误的图片维度")
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/requirements.txt b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/requirements.txt
new file mode 100644
index 000000000..e04023578
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/requirements.txt
@@ -0,0 +1,4 @@
+tensorflow==1.15.0
+cv2
+numpy
+scikit-image
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py
new file mode 100644
index 000000000..57457c50a
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py
@@ -0,0 +1,61 @@
+
+from npu_bridge.npu_init import *
+import tensorflow as tf
+
+from SRFBN_model import SRFBN
+from PreProcess import *
+from skimage.metrics import peak_signal_noise_ratio as compare_psnr
+from skimage.metrics import _structural_similarity
+
+
+
+
+def test_SRFBN(image_lr,image_hr):
+
+ #image
+ height, width, _ = image_lr.shape
+ print(height,width)
+ global load_flag
+ global srfbn
+ global out
+ if load_flag == 0:
+ srfbn = SRFBN(sess, cfg)
+ out = srfbn.test(width, height)
+ tf.global_variables_initializer().run(session=sess)
+ srfbn.saver = tf.train.Saver(max_to_keep=1)
+ srfbn.load()
+ srfbn.l2_regularization_loss = tf.reduce_sum(tf.get_collection("weights_l2_loss"))
+ srfbn.losses = [srfbn.calc_loss(x=x, y=srfbn.labelplaceholder, loss_type=srfbn.cfg.loss_type) for x in
+ srfbn.outs]
+ srfbn.losses = tf.reduce_sum(srfbn.losses) / len(srfbn.losses) / srfbn.cfg.batchsize + srfbn.l2_regularization_loss
+ load_flag += 1
+ cv2.namedWindow("result", 0)
+
+ img_hr = image_hr.reshape([1,height*srfbn.cfg.scale,width*srfbn.cfg.scale,3])
+ img_lr = image_lr.reshape([1, height, width, 3])
+ output,err,l2_loss = sess.run([out,srfbn.losses,srfbn.l2_regularization_loss], feed_dict={srfbn.imageplaceholder: img_lr,srfbn.labelplaceholder:img_hr})
+ output = output[0] * 128 + 127.5
+ img_hr = img_hr.reshape([height*srfbn.cfg.scale,width*srfbn.cfg.scale,3])
+ PSNR = compare_psnr(output, img_hr, data_range=255)
+ ssim = _structural_similarity.structural_similarity(output, img_hr,win_size=11, data_range=255, multichannel=True)
+ print("loss:[%.8f], l2_loss:[%.8f], PSNR:[%.8f], SSIM:[%.8f]"%(err,l2_loss,PSNR,ssim))
+ cv2.imshow("result", np.uint8(output))
+ cv2.waitKey(0)
+
+
+
+if __name__ == '__main__':
+ sess = tf.Session(config=npu_config_proto())
+ from config import SRFBN_config
+ cfg = SRFBN_config()
+ cfg.istest = True
+ cfg.istrain = False
+ image = "/home/TestUser08/BUAA/Resolution_2K/DIV2K/DIV2K_valid_HR/0801.png"
+ batch_label,batch_lrimage = preprocess([image,],cfg)
+ batch_lrimage = np.array(batch_lrimage)
+ batch_label = np.array(batch_label)
+ load_flag = 0
+ for i in range(batch_label.shape[0]):
+ test_SRFBN(batch_lrimage[i],batch_label[i])
+ srfbn.sess.close()
+
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/traditional_blur.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/traditional_blur.py
new file mode 100644
index 000000000..0d83abd5e
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/traditional_blur.py
@@ -0,0 +1,508 @@
+from npu_bridge.npu_init import *
+import numpy as np
+import scipy, cv2
+from scipy import fftpack
+
+
+'''
+modified by Kai Zhang (github: https://github.com/cszn)
+03/03/2019
+'''
+
+
+# --------------------------------
+# get rho and sigma
+# --------------------------------
+def get_rho_sigma(sigma=2.55/255, iter_num=15):
+ '''
+ Kai Zhang (github: https://github.com/cszn)
+ 03/03/2019
+ '''
+ modelSigma1 = 49.0
+ modelSigma2 = 2.55
+ modelSigmaS = np.logspace(np.log10(modelSigma1), np.log10(modelSigma2), iter_num)
+ sigmas = modelSigmaS/255.
+ mus = list(map(lambda x: (sigma**2)/(x**2)/3, sigmas))
+ rhos = mus
+ return rhos, sigmas
+
+
+# --------------------------------
+# HWC, get uperleft and denominator
+# --------------------------------
+def get_uperleft_denominator(img, kernel):
+ '''
+ Kai Zhang (github: https://github.com/cszn)
+ 03/03/2019
+ '''
+ V = psf2otf(kernel, img.shape[:2]) # discrete fourier transform of kernel
+ denominator = np.expand_dims(np.abs(V)**2, axis=2) # Fourier transform of K transpose * K
+ upperleft = np.expand_dims(np.conj(V), axis=2) * np.fft.fft2(img, axes=[0, 1])
+ return upperleft, denominator
+
+
+# otf2psf: not sure where I got this one from. Maybe translated from Octave source code or whatever. It's just math.
+def otf2psf(otf, outsize=None):
+ insize = np.array(otf.shape)
+ psf = np.fft.ifftn(otf, axes=(0, 1))
+ for axis, axis_size in enumerate(insize):
+ psf = np.roll(psf, np.floor(axis_size / 2).astype(int), axis=axis)
+ if type(outsize) != type(None):
+ insize = np.array(otf.shape)
+ outsize = np.array(outsize)
+ n = max(np.size(outsize), np.size(insize))
+ # outsize = postpad(outsize(:), n, 1);
+ # insize = postpad(insize(:) , n, 1);
+ colvec_out = outsize.flatten().reshape((np.size(outsize), 1))
+ colvec_in = insize.flatten().reshape((np.size(insize), 1))
+ outsize = np.pad(colvec_out, ((0, max(0, n - np.size(colvec_out))), (0, 0)), mode="constant")
+ insize = np.pad(colvec_in, ((0, max(0, n - np.size(colvec_in))), (0, 0)), mode="constant")
+
+ pad = (insize - outsize) / 2
+ if np.any(pad < 0):
+ print("otf2psf error: OUTSIZE must be smaller than or equal than OTF size")
+ prepad = np.floor(pad)
+ postpad = np.ceil(pad)
+ dims_start = prepad.astype(int)
+ dims_end = (insize - postpad).astype(int)
+ for i in range(len(dims_start.shape)):
+ psf = np.take(psf, range(dims_start[i][0], dims_end[i][0]), axis=i)
+ n_ops = np.sum(otf.size * np.log2(otf.shape))
+ psf = np.real_if_close(psf, tol=n_ops)
+ return psf
+
+
+# psf2otf copied/modified from https://github.com/aboucaud/pypher/blob/master/pypher/pypher.py
+def psf2otf(psf, shape=None):
+ """
+ Convert point-spread function to optical transfer function.
+ Compute the Fast Fourier Transform (FFT) of the point-spread
+ function (PSF) array and creates the optical transfer function (OTF)
+ array that is not influenced by the PSF off-centering.
+ By default, the OTF array is the same size as the PSF array.
+ To ensure that the OTF is not altered due to PSF off-centering, PSF2OTF
+ post-pads the PSF array (down or to the right) with zeros to match
+ dimensions specified in OUTSIZE, then circularly shifts the values of
+ the PSF array up (or to the left) until the central pixel reaches (1,1)
+ position.
+ Parameters
+ ----------
+ psf : `numpy.ndarray`
+ PSF array
+ shape : int
+ Output shape of the OTF array
+ Returns
+ -------
+ otf : `numpy.ndarray`
+ OTF array
+ Notes
+ -----
+ Adapted from MATLAB psf2otf function
+ """
+ if type(shape) == type(None):
+ shape = psf.shape
+ shape = np.array(shape)
+ if np.all(psf == 0):
+ # return np.zeros_like(psf)
+ return np.zeros(shape)
+ if len(psf.shape) == 1:
+ psf = psf.reshape((1, psf.shape[0]))
+ inshape = psf.shape
+ psf = zero_pad(psf, shape, position='corner')
+ for axis, axis_size in enumerate(inshape):
+ psf = np.roll(psf, -int(axis_size / 2), axis=axis)
+ # Compute the OTF
+ otf = np.fft.fft2(psf, axes=(0, 1))
+ # Estimate the rough number of operations involved in the FFT
+ # and discard the PSF imaginary part if within roundoff error
+ # roundoff error = machine epsilon = sys.float_info.epsilon
+ # or np.finfo().eps
+ n_ops = np.sum(psf.size * np.log2(psf.shape))
+ otf = np.real_if_close(otf, tol=n_ops)
+ return otf
+
+
+def zero_pad(image, shape, position='corner'):
+ """
+ Extends image to a certain size with zeros
+ Parameters
+ ----------
+ image: real 2d `numpy.ndarray`
+ Input image
+ shape: tuple of int
+ Desired output shape of the image
+ position : str, optional
+ The position of the input image in the output one:
+ * 'corner'
+ top-left corner (default)
+ * 'center'
+ centered
+ Returns
+ -------
+ padded_img: real `numpy.ndarray`
+ The zero-padded image
+ """
+ shape = np.asarray(shape, dtype=int)
+ imshape = np.asarray(image.shape, dtype=int)
+ if np.alltrue(imshape == shape):
+ return image
+ if np.any(shape <= 0):
+ raise ValueError("ZERO_PAD: null or negative shape given")
+ dshape = shape - imshape
+ if np.any(dshape < 0):
+ raise ValueError("ZERO_PAD: target size smaller than source one")
+ pad_img = np.zeros(shape, dtype=image.dtype)
+ idx, idy = np.indices(imshape)
+ if position == 'center':
+ if np.any(dshape % 2 != 0):
+ raise ValueError("ZERO_PAD: source and target shapes "
+ "have different parity.")
+ offx, offy = dshape // 2
+ else:
+ offx, offy = (0, 0)
+ pad_img[idx + offx, idy + offy] = image
+ return pad_img
+
+
+'''
+Reducing boundary artifacts
+'''
+
+
+def opt_fft_size(n):
+ '''
+ Kai Zhang (github: https://github.com/cszn)
+ 03/03/2019
+ # opt_fft_size.m
+ # compute an optimal data length for Fourier transforms
+ # written by Sunghyun Cho (sodomau@postech.ac.kr)
+ # persistent opt_fft_size_LUT;
+ '''
+
+ LUT_size = 2048
+ # print("generate opt_fft_size_LUT")
+ opt_fft_size_LUT = np.zeros(LUT_size)
+
+ e2 = 1
+ while e2 <= LUT_size:
+ e3 = e2
+ while e3 <= LUT_size:
+ e5 = e3
+ while e5 <= LUT_size:
+ e7 = e5
+ while e7 <= LUT_size:
+ if e7 <= LUT_size:
+ opt_fft_size_LUT[e7-1] = e7
+ if e7*11 <= LUT_size:
+ opt_fft_size_LUT[e7*11-1] = e7*11
+ if e7*13 <= LUT_size:
+ opt_fft_size_LUT[e7*13-1] = e7*13
+ e7 = e7 * 7
+ e5 = e5 * 5
+ e3 = e3 * 3
+ e2 = e2 * 2
+
+ nn = 0
+ for i in range(LUT_size, 0, -1):
+ if opt_fft_size_LUT[i-1] != 0:
+ nn = i-1
+ else:
+ opt_fft_size_LUT[i-1] = nn+1
+
+ m = np.zeros(len(n))
+ for c in range(len(n)):
+ nn = n[c]
+ if nn <= LUT_size:
+ m[c] = opt_fft_size_LUT[nn-1]
+ else:
+ m[c] = -1
+ return m
+
+
+def wrap_boundary_liu(img, img_size):
+
+ """
+ Reducing boundary artifacts in image deconvolution
+ Renting Liu, Jiaya Jia
+ ICIP 2008
+ """
+ if img.ndim == 2:
+ ret = wrap_boundary(img, img_size)
+ elif img.ndim == 3:
+ ret = [wrap_boundary(img[:, :, i], img_size) for i in range(3)]
+ ret = np.stack(ret, 2)
+ return ret
+
+
+def wrap_boundary(img, img_size):
+
+ """
+ python code from:
+ https://github.com/ys-koshelev/nla_deblur/blob/90fe0ab98c26c791dcbdf231fe6f938fca80e2a0/boundaries.py
+ Reducing boundary artifacts in image deconvolution
+ Renting Liu, Jiaya Jia
+ ICIP 2008
+ """
+ (H, W) = np.shape(img)
+ H_w = int(img_size[0]) - H
+ W_w = int(img_size[1]) - W
+
+ # ret = np.zeros((img_size[0], img_size[1]));
+ alpha = 1
+ HG = img[:, :]
+
+ r_A = np.zeros((alpha*2+H_w, W))
+ r_A[:alpha, :] = HG[-alpha:, :]
+ r_A[-alpha:, :] = HG[:alpha, :]
+ a = np.arange(H_w)/(H_w-1)
+ # r_A(alpha+1:end-alpha, 1) = (1-a)*r_A(alpha,1) + a*r_A(end-alpha+1,1)
+ r_A[alpha:-alpha, 0] = (1-a)*r_A[alpha-1, 0] + a*r_A[-alpha, 0]
+ # r_A(alpha+1:end-alpha, end) = (1-a)*r_A(alpha,end) + a*r_A(end-alpha+1,end)
+ r_A[alpha:-alpha, -1] = (1-a)*r_A[alpha-1, -1] + a*r_A[-alpha, -1]
+
+ r_B = np.zeros((H, alpha*2+W_w))
+ r_B[:, :alpha] = HG[:, -alpha:]
+ r_B[:, -alpha:] = HG[:, :alpha]
+ a = np.arange(W_w)/(W_w-1)
+ r_B[0, alpha:-alpha] = (1-a)*r_B[0, alpha-1] + a*r_B[0, -alpha]
+ r_B[-1, alpha:-alpha] = (1-a)*r_B[-1, alpha-1] + a*r_B[-1, -alpha]
+
+ if alpha == 1:
+ A2 = solve_min_laplacian(r_A[alpha-1:, :])
+ B2 = solve_min_laplacian(r_B[:, alpha-1:])
+ r_A[alpha-1:, :] = A2
+ r_B[:, alpha-1:] = B2
+ else:
+ A2 = solve_min_laplacian(r_A[alpha-1:-alpha+1, :])
+ r_A[alpha-1:-alpha+1, :] = A2
+ B2 = solve_min_laplacian(r_B[:, alpha-1:-alpha+1])
+ r_B[:, alpha-1:-alpha+1] = B2
+ A = r_A
+ B = r_B
+
+ r_C = np.zeros((alpha*2+H_w, alpha*2+W_w))
+ r_C[:alpha, :] = B[-alpha:, :]
+ r_C[-alpha:, :] = B[:alpha, :]
+ r_C[:, :alpha] = A[:, -alpha:]
+ r_C[:, -alpha:] = A[:, :alpha]
+
+ if alpha == 1:
+ C2 = C2 = solve_min_laplacian(r_C[alpha-1:, alpha-1:])
+ r_C[alpha-1:, alpha-1:] = C2
+ else:
+ C2 = solve_min_laplacian(r_C[alpha-1:-alpha+1, alpha-1:-alpha+1])
+ r_C[alpha-1:-alpha+1, alpha-1:-alpha+1] = C2
+ C = r_C
+ # return C
+ A = A[alpha-1:-alpha-1, :]
+ B = B[:, alpha:-alpha]
+ C = C[alpha:-alpha, alpha:-alpha]
+ ret = np.vstack((np.hstack((img, B)), np.hstack((A, C))))
+ return ret
+
+
+def solve_min_laplacian(boundary_image):
+ (H, W) = np.shape(boundary_image)
+
+ # Laplacian
+ f = np.zeros((H, W))
+ # boundary image contains image intensities at boundaries
+ boundary_image[1:-1, 1:-1] = 0
+ j = np.arange(2, H)-1
+ k = np.arange(2, W)-1
+ f_bp = np.zeros((H, W))
+ f_bp[np.ix_(j, k)] = -4*boundary_image[np.ix_(j, k)] + boundary_image[np.ix_(j, k+1)] + boundary_image[np.ix_(j, k-1)] + boundary_image[np.ix_(j-1, k)] + boundary_image[np.ix_(j+1, k)]
+
+ del(j, k)
+ f1 = f - f_bp # subtract boundary points contribution
+ del(f_bp, f)
+
+ # DST Sine Transform algo starts here
+ f2 = f1[1:-1,1:-1]
+ del(f1)
+
+ # compute sine tranform
+ if f2.shape[1] == 1:
+ tt = fftpack.dst(f2, type=1, axis=0)/2
+ else:
+ tt = fftpack.dst(f2, type=1)/2
+
+ if tt.shape[0] == 1:
+ f2sin = np.transpose(fftpack.dst(np.transpose(tt), type=1, axis=0)/2)
+ else:
+ f2sin = np.transpose(fftpack.dst(np.transpose(tt), type=1)/2)
+ del(f2)
+
+ # compute Eigen Values
+ [x, y] = np.meshgrid(np.arange(1, W-1), np.arange(1, H-1))
+ denom = (2*np.cos(np.pi*x/(W-1))-2) + (2*np.cos(np.pi*y/(H-1)) - 2)
+
+ # divide
+ f3 = f2sin/denom
+ del(f2sin, x, y)
+
+ # compute Inverse Sine Transform
+ if f3.shape[0] == 1:
+ tt = fftpack.idst(f3*2, type=1, axis=1)/(2*(f3.shape[1]+1))
+ else:
+ tt = fftpack.idst(f3*2, type=1, axis=0)/(2*(f3.shape[0]+1))
+ del(f3)
+ if tt.shape[1] == 1:
+ img_tt = np.transpose(fftpack.idst(np.transpose(tt)*2, type=1)/(2*(tt.shape[0]+1)))
+ else:
+ img_tt = np.transpose(fftpack.idst(np.transpose(tt)*2, type=1, axis=0)/(2*(tt.shape[1]+1)))
+ del(tt)
+
+ # put solution in inner points; outer points obtained from boundary image
+ img_direct = boundary_image
+ img_direct[1:-1, 1:-1] = 0
+ img_direct[1:-1, 1:-1] = img_tt
+ return img_direct
+
+
+"""
+Created on Thu Jan 18 15:36:32 2018
+@author: italo
+https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
+"""
+
+"""
+Syntax
+h = fspecial(type)
+h = fspecial('average',hsize)
+h = fspecial('disk',radius)
+h = fspecial('gaussian',hsize,sigma)
+h = fspecial('laplacian',alpha)
+h = fspecial('log',hsize,sigma)
+h = fspecial('motion',len,theta)
+h = fspecial('prewitt')
+h = fspecial('sobel')
+"""
+
+
+def fspecial_average(hsize=3):
+ """Smoothing filter"""
+ return np.ones((hsize, hsize))/hsize**2
+
+
+def fspecial_disk(radius):
+ """Disk filter"""
+ # raise(NotImplemented)
+ # rad = 0.6
+ rad = radius
+ crad = np.ceil(rad-0.5)
+ [x, y] = np.meshgrid(np.arange(-crad, crad+1), np.arange(-crad, crad+1))
+ maxxy = np.zeros(x.shape)
+ maxxy[abs(x) >= abs(y)] = abs(x)[abs(x) >= abs(y)]
+ maxxy[abs(y) >= abs(x)] = abs(y)[abs(y) >= abs(x)]
+ minxy = np.zeros(x.shape)
+ minxy[abs(x) <= abs(y)] = abs(x)[abs(x) <= abs(y)]
+ minxy[abs(y) <= abs(x)] = abs(y)[abs(y) <= abs(x)]
+ m1 = (rad**2 < (maxxy+0.5)**2 + (minxy-0.5)**2)*(minxy-0.5) +\
+ (rad**2 >= (maxxy+0.5)**2 + (minxy-0.5)**2)*\
+ np.sqrt((rad**2 + 0j) - (maxxy + 0.5)**2)
+ m2 = (rad**2 > (maxxy-0.5)**2 + (minxy+0.5)**2)*(minxy+0.5) +\
+ (rad**2 <= (maxxy-0.5)**2 + (minxy+0.5)**2)*\
+ np.sqrt((rad**2 + 0j) - (maxxy - 0.5)**2)
+ # sgrid = (rad**2 * (0.5*(asin(m2/rad) - asin(m1/rad)) + 0.25*(sin(2*asin(m2/rad)) - )
+ h = None
+ return h
+
+
+def fspecial_gaussian(hsize, sigma):
+ hsize = [hsize, hsize]
+ siz = [(hsize[0]-1.0)/2.0, (hsize[1]-1.0)/2.0]
+ std = sigma
+ [x, y] = np.meshgrid(np.arange(-siz[1], siz[1]+1), np.arange(-siz[0], siz[0]+1))
+ arg = -(x*x + y*y)/(2*std*std)
+ h = np.exp(arg)
+ h[h < scipy.finfo(float).eps * h.max()] = 0
+ sumh = h.sum()
+ if sumh != 0:
+ h = h/sumh
+ return h
+
+
+
+def fspecial(filter_type, *args, **kwargs):
+ '''
+ python code from:
+ https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
+ '''
+ if filter_type == 'average':
+ return fspecial_average(*args, **kwargs)
+ if filter_type == 'disk':
+ return fspecial_disk(*args, **kwargs)
+ if filter_type == 'gaussian':
+ return fspecial_gaussian(*args, **kwargs)
+
+
+def traditional_blur(img, filter_type):
+ ksize = np.random.choice([3, 5], 1)
+ # ksize = 3
+ if filter_type == 'average':
+ img = np.array(img, np.float32)
+ blur_img = cv2.medianBlur(img, ksize=ksize)
+ return blur_img
+ if filter_type == 'gaussian':
+ blur_img = cv2.GaussianBlur(img, ksize=(ksize, ksize), sigmaX=0, sigmaY=0)
+ return blur_img
+
+def motion_blur(img, angle=45):
+ degree = np.random.randint(1, 8, 1) # 8模糊度已经足够,轻重模糊随机
+ image = np.array(img)
+
+ # 这里生成任意角度的运动模糊kernel的矩阵, degree越大,模糊程度越高
+ M = cv2.getRotationMatrix2D((degree / 2, degree / 2), angle, 1)
+ motion_blur_kernel = np.diag(np.ones(degree))
+ motion_blur_kernel = cv2.warpAffine(motion_blur_kernel, M, (degree, degree))
+
+ motion_blur_kernel = motion_blur_kernel / degree
+ blurred = cv2.filter2D(image, -1, motion_blur_kernel)
+
+ # convert to uint8
+ cv2.normalize(blurred, blurred, 0, 255, cv2.NORM_MINMAX)
+ blurred = (blurred-127.5)/128
+ return blurred
+
+
+if __name__ == '__main__':
+ a = opt_fft_size([111])
+ print(a)
+
+ print(fspecial('gaussian', 5, 1))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py
new file mode 100644
index 000000000..58a4e6660
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py
@@ -0,0 +1,124 @@
+
+
+
+from npu_bridge.npu_init import *
+from SRFBN_model import SRFBN
+import tensorflow as tf
+import time
+from PreProcess import *
+from skimage.metrics import peak_signal_noise_ratio as comparepsnr
+from skimage.metrics import _structural_similarity
+
+def train_SRFBN(dataset, sess, cfg):
+ # start put data in queue
+ with tf.device('/cpu:0'):
+ step = tf.Variable(0, trainable=False)
+ srfbn = SRFBN(sess=sess, cfg=cfg)
+ srfbn.train_step()
+ out = tf.add_n(srfbn.outs) / srfbn.cfg.num_steps
+ ## build Optimizer
+ #使学习率在不同迭代阶段不同
+ boundaries = [len(dataset)*epoch//cfg.batchsize for epoch in cfg.lr_steps]
+ values = [cfg.learning_rate*(cfg.lr_gama**i) for i in range(len(cfg.lr_steps)+1)]
+ lr = tf.train.piecewise_constant(step, boundaries, values)
+ update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
+ optimizer = tf.train.AdamOptimizer(learning_rate=lr)
+ with tf.control_dependencies(update_ops):
+ gs_vs = optimizer.compute_gradients(srfbn.losses)
+ with tf.device('/cpu:0'):
+ train_op = optimizer.apply_gradients(grads_and_vars=gs_vs, global_step=step)
+
+ tf.global_variables_initializer().run(session=sess)
+
+ summary_writer = tf.summary.FileWriter(cfg.srfbn_logdir, srfbn.sess.graph)
+ #加载模型
+ if srfbn.cfg.load_premodel:
+ counter = srfbn.load()
+ else:
+ counter = 0
+ time_ = time.time()
+ print("\nNow Start Training...\n")
+ global_step = 0
+ for ep in range(cfg.epoch):
+
+ #每次训练时挑选照片的顺序是随机的
+ pic_idx = np.random.permutation(len(dataset))
+ picid = 0
+ #一次加载五张图像
+ for i in range(0,len(dataset),5):
+ index = []
+ for j in range(5):
+ index.append(pic_idx[i+j])
+ imgnames = []
+ for pic in index:
+ imgnames.append(dataset[pic])
+ picid += 5
+ print(imgnames)
+ batch_labels, batch_images = preprocess(imgnames, cfg)
+ patch_idx = list(range(len(batch_labels)))
+ #使得图片块的数量刚好能被batchsize整除
+ if len(patch_idx) % cfg.batchsize != 0:
+ patch_idx.extend(list(np.random.choice(patch_idx,
+ cfg.batchsize * ((len(patch_idx) // cfg.batchsize)+1) - len(patch_idx))))
+
+
+ patch_idx = np.random.permutation(patch_idx)
+
+
+ iterations = len(patch_idx) // cfg.batchsize
+
+
+ for it in range(iterations):
+
+ idx = list(patch_idx[it * cfg.batchsize: (it+1)* cfg.batchsize])
+
+
+ patch_labels = np.array(batch_labels)[idx]
+
+ patch_images = np.array(batch_images)[idx]
+
+
+ output,_, loss,l2_loss,= srfbn.sess.run([out,train_op, srfbn.losses,srfbn.l2_regularization_loss],
+ feed_dict={srfbn.imageplaceholder: patch_images,
+ srfbn.labelplaceholder: patch_labels})
+ output = output[0] * 128 + 127.5
+ img_hr = patch_labels.reshape([srfbn.cfg.imagesize * srfbn.cfg.scale, srfbn.cfg.imagesize * srfbn.cfg.scale, 3])
+ PSNR = comparepsnr(output, img_hr, data_range=255)
+ ssim = _structural_similarity.structural_similarity(output, img_hr, win_size=11, data_range=255,
+ multichannel=True)
+
+ if it % 10 == 0:
+ print("Epoch:%2d, pic:%d, step:%2d, global_step:%d, time :%4.4f, loss:%.8f, l2_loss:%.8f, PSNR:%.8f, SSIM:%.8f" % (
+ (ep + 1),picid, it,global_step,time.time() - time_, loss,l2_loss,PSNR,ssim))
+ if it % 100 == 0:
+ srfbn.save(counter)
+ summary_str = srfbn.sess.run(srfbn.merged_summary,
+ feed_dict={srfbn.imageplaceholder: patch_images,
+ srfbn.labelplaceholder: patch_labels})
+ summary_writer.add_summary(summary_str, counter)
+
+ global_step += 1
+ counter += 1
+
+#训练
+def train(*args, **kwargs):
+ data_dir = kwargs["data_dir"]
+ imgs = [os.path.join(data_dir,data) for data in os.listdir(data_dir)]
+
+
+
+ sess = tf.compat.v1.Session(config=npu_config_proto())
+
+ ## build NetWork
+ from config import SRFBN_config
+ cfg = SRFBN_config()
+ datasetet = imgs
+ train_SRFBN(datasetet, sess, cfg)
+
+
+
+if __name__ == '__main__':
+ import os
+ data_dir = "/home/TestUser08/BUAA/output_npu_20221021153629/SRFBN-tensorflow_npu_20221021153629/Resolution_2K/DIV2K/DIV2K_train_HR"
+ train(data_dir=data_dir)
+
--
Gitee
From c96ebeeb97ea4f6dd71e8833c12eaf19bedd0191 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 03:12:02 +0000
Subject: [PATCH 03/26] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20test?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/.keep | 0
1 file changed, 0 insertions(+), 0 deletions(-)
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/.keep
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/.keep b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/.keep
new file mode 100644
index 000000000..e69de29bb
--
Gitee
From de091f72fa9f8f07fe9b5284de8d6772d931c2ca Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 03:12:25 +0000
Subject: [PATCH 04/26] =?UTF-8?q?=E6=80=A7=E8=83=BD=E8=AE=AD=E7=BB=83?=
=?UTF-8?q?=E8=84=9A=E6=9C=AC?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../test/train_performance_1p.sh | 153 ++++++++++++++++++
1 file changed, 153 insertions(+)
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/train_performance_1p.sh
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/train_performance_1p.sh b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/train_performance_1p.sh
new file mode 100644
index 000000000..5882b5f4f
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/train_performance_1p.sh
@@ -0,0 +1,153 @@
+#!/bin/bash
+
+##########################################################
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+##########################################################
+# shell脚本所在路径
+cur_path=`echo $(cd $(dirname $0);pwd)`
+
+# 判断当前shell是否是performance
+perf_flag=`echo $0 | grep performance | wc -l`
+
+# 当前执行网络的名称
+Network=`echo $(cd $(dirname $0);pwd) | awk -F"/" '{print $(NF-1)}'`
+
+export RANK_SIZE=1
+export RANK_ID=0
+export JOB_ID=10087
+
+# 路径参数初始化
+data_path=""
+output_path=""
+
+# 帮助信息,不需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo "usage:./train_performance_1P.sh "
+ echo " "
+ echo "parameter explain:
+ --data_path # dataset of training
+ --output_path # output of training
+ --train_steps # max_step for training
+ --train_epochs # max_epoch for training
+ --batch_size # batch size
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+# 参数校验,不需要修改
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --output_path* ]];then
+ output_path=`echo ${para#*=}`
+ elif [[ $para == --train_steps* ]];then
+ train_steps=`echo ${para#*=}`
+ elif [[ $para == --train_epochs* ]];then
+ train_epochs=`echo ${para#*=}`
+ elif [[ $para == --batch_size* ]];then
+ batch_size=`echo ${para#*=}`
+ fi
+done
+
+# 校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be config"
+ exit 1
+fi
+
+# 校验是否传入output_path,不需要修改
+if [[ $output_path == "" ]];then
+ output_path="./output/${ASCEND_DEVICE_ID}"
+fi
+
+CaseName=""
+function get_casename()
+{
+ if [ x"${perf_flag}" = x1 ];
+ then
+ CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'perf'
+ else
+ CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'acc'
+ fi
+}
+
+# 跳转到code目录
+cd ${cur_path}/../
+rm -rf ./test/output/${ASCEND_DEVICE_ID}
+mkdir -p ./test/output/${ASCEND_DEVICE_ID}
+
+# 训练开始时间记录,不需要修改
+start_time=$(date +%s)
+##########################################################
+#########第3行 至 90行,请一定不要、不要、不要修改##########
+#########第3行 至 90行,请一定不要、不要、不要修改##########
+#########第3行 至 90行,请一定不要、不要、不要修改##########
+##########################################################
+
+#=========================================================
+#=========================================================
+#========训练执行命令,需要根据您的网络进行修改==============
+#=========================================================
+#=========================================================
+# 基础参数,需要模型审视修改
+# 您的训练数据集在${data_path}路径下,请直接使用这个变量获取
+# 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取
+# 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值
+train_epochs=1
+batch_size=1
+print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log"
+python3 ./train.py > ${print_log}
+# 性能相关数据计算
+StepTime=`((cat ${print_log} | grep "time" | head -n 1) && (cat ${print_log} | grep "time" | tail -n 1)) | awk -F ':' '{print $5 $6 }' | awk -F ',' '{print $1 $2}' | awk -F ' ' '{print $1;print $3}' | awk '{if (NR == 1){a=$1} else if (NR == 2){b=$1} else if (NR == 3){c=$1} else if (NR == 4){d=$1}} END {print (d-b)/(c-a)}'`
+FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'/'${StepTime}'}'`
+#PSNR值计算
+PSNR=`cat ${print_log} | grep "time" | tail -n 10 | awk -F ',' '{print $8}' | awk -F ':' '{sum+=$2} END {print sum/NR}'`
+# 提取所有loss打印信息
+grep "loss:" ${print_log} | awk -F "," '{print $6}' > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt
+
+
+###########################################################
+#########后面的所有内容请不要修改###########################
+#########后面的所有内容请不要修改###########################
+#########后面的所有内容请不要修改###########################
+###########################################################
+
+# 获取最终的casename,请保留,case文件名为${CaseName}
+get_casename
+
+# 重命名loss文件
+if [ -f ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ];
+then
+ mv ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt
+fi
+
+# 训练端到端耗时
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+echo "------------------ Final result ------------------"
+# 输出性能FPS/单step耗时/端到端耗时
+echo "Final Performance images/sec : $FPS"
+echo "Final Performance sec/step : $StepTime"
+echo "E2E Training Duration sec : $e2e_time"
+echo "PSNR : $PSNR"
+# 输出训练精度
+#echo "Final Train Accuracy : ${train_accuracy}"
+
+# 最后一个迭代loss值,不需要修改
+ActualLoss=(`awk 'END {print $NF}' ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt`)
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${batch_size}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = `uname -m`" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${FPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${StepTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
--
Gitee
From 4e08e4d21a69d974fcfd445d821e7b7a66f2191d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 04:02:51 +0000
Subject: [PATCH 05/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../cv/SRFBN_for_TensorFlow/Basic_Model.py | 28 +++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py
index e902cf5b6..3051156e8 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py
@@ -1,3 +1,31 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from npu_bridge.npu_init import *
import tensorflow as tf
from tensorflow.contrib import layers
--
Gitee
From 2ae87e9e290ed24113405e05786a48604ac29972 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 04:03:30 +0000
Subject: [PATCH 06/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../cv/SRFBN_for_TensorFlow/PreProcess.py | 28 +++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
index c38999b3c..0d13c7cac 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
@@ -1,3 +1,31 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from npu_bridge.npu_init import *
import cv2
import numpy as np
--
Gitee
From ca798014da0ed3949eec721d1bd738d82c2395ee Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 04:03:54 +0000
Subject: [PATCH 07/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../cv/SRFBN_for_TensorFlow/SRFBN_model.py | 28 +++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py
index a3e01bac3..1ce90c923 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py
@@ -1,3 +1,31 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from npu_bridge.npu_init import *
import tensorflow as tf
import os
--
Gitee
From b5171ef392a47d59f534ad685709804bd5c4c970 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 04:04:16 +0000
Subject: [PATCH 08/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../contrib/cv/SRFBN_for_TensorFlow/config.py | 28 +++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py
index d6d3b5592..4da2aec8f 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py
@@ -1,3 +1,31 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
from npu_bridge.npu_init import *
import os
--
Gitee
From 195976f53bff13e0b6c7cbb73d92ace54d07081e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 04:04:45 +0000
Subject: [PATCH 09/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../contrib/cv/SRFBN_for_TensorFlow/test.py | 27 +++++++++++++++++++
1 file changed, 27 insertions(+)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py
index 57457c50a..66419630c 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py
@@ -1,3 +1,30 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from npu_bridge.npu_init import *
import tensorflow as tf
--
Gitee
From 360200965be0acf509adf0d3288d13c64d02f0df Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 04:05:06 +0000
Subject: [PATCH 10/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../contrib/cv/SRFBN_for_TensorFlow/train.py | 28 ++++++++++++++++++-
1 file changed, 27 insertions(+), 1 deletion(-)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py
index 58a4e6660..58e3a8272 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py
@@ -1,4 +1,30 @@
-
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from npu_bridge.npu_init import *
--
Gitee
From fe9961e780567f1b208fe46c23ffa896673da98f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 04:05:28 +0000
Subject: [PATCH 11/26] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow/contrib/cv/SRFBN=5Ffor=5FTensorFlow/traditional=5Fblur?=
=?UTF-8?q?.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../SRFBN_for_TensorFlow/traditional_blur.py | 508 ------------------
1 file changed, 508 deletions(-)
delete mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/traditional_blur.py
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/traditional_blur.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/traditional_blur.py
deleted file mode 100644
index 0d83abd5e..000000000
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/traditional_blur.py
+++ /dev/null
@@ -1,508 +0,0 @@
-from npu_bridge.npu_init import *
-import numpy as np
-import scipy, cv2
-from scipy import fftpack
-
-
-'''
-modified by Kai Zhang (github: https://github.com/cszn)
-03/03/2019
-'''
-
-
-# --------------------------------
-# get rho and sigma
-# --------------------------------
-def get_rho_sigma(sigma=2.55/255, iter_num=15):
- '''
- Kai Zhang (github: https://github.com/cszn)
- 03/03/2019
- '''
- modelSigma1 = 49.0
- modelSigma2 = 2.55
- modelSigmaS = np.logspace(np.log10(modelSigma1), np.log10(modelSigma2), iter_num)
- sigmas = modelSigmaS/255.
- mus = list(map(lambda x: (sigma**2)/(x**2)/3, sigmas))
- rhos = mus
- return rhos, sigmas
-
-
-# --------------------------------
-# HWC, get uperleft and denominator
-# --------------------------------
-def get_uperleft_denominator(img, kernel):
- '''
- Kai Zhang (github: https://github.com/cszn)
- 03/03/2019
- '''
- V = psf2otf(kernel, img.shape[:2]) # discrete fourier transform of kernel
- denominator = np.expand_dims(np.abs(V)**2, axis=2) # Fourier transform of K transpose * K
- upperleft = np.expand_dims(np.conj(V), axis=2) * np.fft.fft2(img, axes=[0, 1])
- return upperleft, denominator
-
-
-# otf2psf: not sure where I got this one from. Maybe translated from Octave source code or whatever. It's just math.
-def otf2psf(otf, outsize=None):
- insize = np.array(otf.shape)
- psf = np.fft.ifftn(otf, axes=(0, 1))
- for axis, axis_size in enumerate(insize):
- psf = np.roll(psf, np.floor(axis_size / 2).astype(int), axis=axis)
- if type(outsize) != type(None):
- insize = np.array(otf.shape)
- outsize = np.array(outsize)
- n = max(np.size(outsize), np.size(insize))
- # outsize = postpad(outsize(:), n, 1);
- # insize = postpad(insize(:) , n, 1);
- colvec_out = outsize.flatten().reshape((np.size(outsize), 1))
- colvec_in = insize.flatten().reshape((np.size(insize), 1))
- outsize = np.pad(colvec_out, ((0, max(0, n - np.size(colvec_out))), (0, 0)), mode="constant")
- insize = np.pad(colvec_in, ((0, max(0, n - np.size(colvec_in))), (0, 0)), mode="constant")
-
- pad = (insize - outsize) / 2
- if np.any(pad < 0):
- print("otf2psf error: OUTSIZE must be smaller than or equal than OTF size")
- prepad = np.floor(pad)
- postpad = np.ceil(pad)
- dims_start = prepad.astype(int)
- dims_end = (insize - postpad).astype(int)
- for i in range(len(dims_start.shape)):
- psf = np.take(psf, range(dims_start[i][0], dims_end[i][0]), axis=i)
- n_ops = np.sum(otf.size * np.log2(otf.shape))
- psf = np.real_if_close(psf, tol=n_ops)
- return psf
-
-
-# psf2otf copied/modified from https://github.com/aboucaud/pypher/blob/master/pypher/pypher.py
-def psf2otf(psf, shape=None):
- """
- Convert point-spread function to optical transfer function.
- Compute the Fast Fourier Transform (FFT) of the point-spread
- function (PSF) array and creates the optical transfer function (OTF)
- array that is not influenced by the PSF off-centering.
- By default, the OTF array is the same size as the PSF array.
- To ensure that the OTF is not altered due to PSF off-centering, PSF2OTF
- post-pads the PSF array (down or to the right) with zeros to match
- dimensions specified in OUTSIZE, then circularly shifts the values of
- the PSF array up (or to the left) until the central pixel reaches (1,1)
- position.
- Parameters
- ----------
- psf : `numpy.ndarray`
- PSF array
- shape : int
- Output shape of the OTF array
- Returns
- -------
- otf : `numpy.ndarray`
- OTF array
- Notes
- -----
- Adapted from MATLAB psf2otf function
- """
- if type(shape) == type(None):
- shape = psf.shape
- shape = np.array(shape)
- if np.all(psf == 0):
- # return np.zeros_like(psf)
- return np.zeros(shape)
- if len(psf.shape) == 1:
- psf = psf.reshape((1, psf.shape[0]))
- inshape = psf.shape
- psf = zero_pad(psf, shape, position='corner')
- for axis, axis_size in enumerate(inshape):
- psf = np.roll(psf, -int(axis_size / 2), axis=axis)
- # Compute the OTF
- otf = np.fft.fft2(psf, axes=(0, 1))
- # Estimate the rough number of operations involved in the FFT
- # and discard the PSF imaginary part if within roundoff error
- # roundoff error = machine epsilon = sys.float_info.epsilon
- # or np.finfo().eps
- n_ops = np.sum(psf.size * np.log2(psf.shape))
- otf = np.real_if_close(otf, tol=n_ops)
- return otf
-
-
-def zero_pad(image, shape, position='corner'):
- """
- Extends image to a certain size with zeros
- Parameters
- ----------
- image: real 2d `numpy.ndarray`
- Input image
- shape: tuple of int
- Desired output shape of the image
- position : str, optional
- The position of the input image in the output one:
- * 'corner'
- top-left corner (default)
- * 'center'
- centered
- Returns
- -------
- padded_img: real `numpy.ndarray`
- The zero-padded image
- """
- shape = np.asarray(shape, dtype=int)
- imshape = np.asarray(image.shape, dtype=int)
- if np.alltrue(imshape == shape):
- return image
- if np.any(shape <= 0):
- raise ValueError("ZERO_PAD: null or negative shape given")
- dshape = shape - imshape
- if np.any(dshape < 0):
- raise ValueError("ZERO_PAD: target size smaller than source one")
- pad_img = np.zeros(shape, dtype=image.dtype)
- idx, idy = np.indices(imshape)
- if position == 'center':
- if np.any(dshape % 2 != 0):
- raise ValueError("ZERO_PAD: source and target shapes "
- "have different parity.")
- offx, offy = dshape // 2
- else:
- offx, offy = (0, 0)
- pad_img[idx + offx, idy + offy] = image
- return pad_img
-
-
-'''
-Reducing boundary artifacts
-'''
-
-
-def opt_fft_size(n):
- '''
- Kai Zhang (github: https://github.com/cszn)
- 03/03/2019
- # opt_fft_size.m
- # compute an optimal data length for Fourier transforms
- # written by Sunghyun Cho (sodomau@postech.ac.kr)
- # persistent opt_fft_size_LUT;
- '''
-
- LUT_size = 2048
- # print("generate opt_fft_size_LUT")
- opt_fft_size_LUT = np.zeros(LUT_size)
-
- e2 = 1
- while e2 <= LUT_size:
- e3 = e2
- while e3 <= LUT_size:
- e5 = e3
- while e5 <= LUT_size:
- e7 = e5
- while e7 <= LUT_size:
- if e7 <= LUT_size:
- opt_fft_size_LUT[e7-1] = e7
- if e7*11 <= LUT_size:
- opt_fft_size_LUT[e7*11-1] = e7*11
- if e7*13 <= LUT_size:
- opt_fft_size_LUT[e7*13-1] = e7*13
- e7 = e7 * 7
- e5 = e5 * 5
- e3 = e3 * 3
- e2 = e2 * 2
-
- nn = 0
- for i in range(LUT_size, 0, -1):
- if opt_fft_size_LUT[i-1] != 0:
- nn = i-1
- else:
- opt_fft_size_LUT[i-1] = nn+1
-
- m = np.zeros(len(n))
- for c in range(len(n)):
- nn = n[c]
- if nn <= LUT_size:
- m[c] = opt_fft_size_LUT[nn-1]
- else:
- m[c] = -1
- return m
-
-
-def wrap_boundary_liu(img, img_size):
-
- """
- Reducing boundary artifacts in image deconvolution
- Renting Liu, Jiaya Jia
- ICIP 2008
- """
- if img.ndim == 2:
- ret = wrap_boundary(img, img_size)
- elif img.ndim == 3:
- ret = [wrap_boundary(img[:, :, i], img_size) for i in range(3)]
- ret = np.stack(ret, 2)
- return ret
-
-
-def wrap_boundary(img, img_size):
-
- """
- python code from:
- https://github.com/ys-koshelev/nla_deblur/blob/90fe0ab98c26c791dcbdf231fe6f938fca80e2a0/boundaries.py
- Reducing boundary artifacts in image deconvolution
- Renting Liu, Jiaya Jia
- ICIP 2008
- """
- (H, W) = np.shape(img)
- H_w = int(img_size[0]) - H
- W_w = int(img_size[1]) - W
-
- # ret = np.zeros((img_size[0], img_size[1]));
- alpha = 1
- HG = img[:, :]
-
- r_A = np.zeros((alpha*2+H_w, W))
- r_A[:alpha, :] = HG[-alpha:, :]
- r_A[-alpha:, :] = HG[:alpha, :]
- a = np.arange(H_w)/(H_w-1)
- # r_A(alpha+1:end-alpha, 1) = (1-a)*r_A(alpha,1) + a*r_A(end-alpha+1,1)
- r_A[alpha:-alpha, 0] = (1-a)*r_A[alpha-1, 0] + a*r_A[-alpha, 0]
- # r_A(alpha+1:end-alpha, end) = (1-a)*r_A(alpha,end) + a*r_A(end-alpha+1,end)
- r_A[alpha:-alpha, -1] = (1-a)*r_A[alpha-1, -1] + a*r_A[-alpha, -1]
-
- r_B = np.zeros((H, alpha*2+W_w))
- r_B[:, :alpha] = HG[:, -alpha:]
- r_B[:, -alpha:] = HG[:, :alpha]
- a = np.arange(W_w)/(W_w-1)
- r_B[0, alpha:-alpha] = (1-a)*r_B[0, alpha-1] + a*r_B[0, -alpha]
- r_B[-1, alpha:-alpha] = (1-a)*r_B[-1, alpha-1] + a*r_B[-1, -alpha]
-
- if alpha == 1:
- A2 = solve_min_laplacian(r_A[alpha-1:, :])
- B2 = solve_min_laplacian(r_B[:, alpha-1:])
- r_A[alpha-1:, :] = A2
- r_B[:, alpha-1:] = B2
- else:
- A2 = solve_min_laplacian(r_A[alpha-1:-alpha+1, :])
- r_A[alpha-1:-alpha+1, :] = A2
- B2 = solve_min_laplacian(r_B[:, alpha-1:-alpha+1])
- r_B[:, alpha-1:-alpha+1] = B2
- A = r_A
- B = r_B
-
- r_C = np.zeros((alpha*2+H_w, alpha*2+W_w))
- r_C[:alpha, :] = B[-alpha:, :]
- r_C[-alpha:, :] = B[:alpha, :]
- r_C[:, :alpha] = A[:, -alpha:]
- r_C[:, -alpha:] = A[:, :alpha]
-
- if alpha == 1:
- C2 = C2 = solve_min_laplacian(r_C[alpha-1:, alpha-1:])
- r_C[alpha-1:, alpha-1:] = C2
- else:
- C2 = solve_min_laplacian(r_C[alpha-1:-alpha+1, alpha-1:-alpha+1])
- r_C[alpha-1:-alpha+1, alpha-1:-alpha+1] = C2
- C = r_C
- # return C
- A = A[alpha-1:-alpha-1, :]
- B = B[:, alpha:-alpha]
- C = C[alpha:-alpha, alpha:-alpha]
- ret = np.vstack((np.hstack((img, B)), np.hstack((A, C))))
- return ret
-
-
-def solve_min_laplacian(boundary_image):
- (H, W) = np.shape(boundary_image)
-
- # Laplacian
- f = np.zeros((H, W))
- # boundary image contains image intensities at boundaries
- boundary_image[1:-1, 1:-1] = 0
- j = np.arange(2, H)-1
- k = np.arange(2, W)-1
- f_bp = np.zeros((H, W))
- f_bp[np.ix_(j, k)] = -4*boundary_image[np.ix_(j, k)] + boundary_image[np.ix_(j, k+1)] + boundary_image[np.ix_(j, k-1)] + boundary_image[np.ix_(j-1, k)] + boundary_image[np.ix_(j+1, k)]
-
- del(j, k)
- f1 = f - f_bp # subtract boundary points contribution
- del(f_bp, f)
-
- # DST Sine Transform algo starts here
- f2 = f1[1:-1,1:-1]
- del(f1)
-
- # compute sine tranform
- if f2.shape[1] == 1:
- tt = fftpack.dst(f2, type=1, axis=0)/2
- else:
- tt = fftpack.dst(f2, type=1)/2
-
- if tt.shape[0] == 1:
- f2sin = np.transpose(fftpack.dst(np.transpose(tt), type=1, axis=0)/2)
- else:
- f2sin = np.transpose(fftpack.dst(np.transpose(tt), type=1)/2)
- del(f2)
-
- # compute Eigen Values
- [x, y] = np.meshgrid(np.arange(1, W-1), np.arange(1, H-1))
- denom = (2*np.cos(np.pi*x/(W-1))-2) + (2*np.cos(np.pi*y/(H-1)) - 2)
-
- # divide
- f3 = f2sin/denom
- del(f2sin, x, y)
-
- # compute Inverse Sine Transform
- if f3.shape[0] == 1:
- tt = fftpack.idst(f3*2, type=1, axis=1)/(2*(f3.shape[1]+1))
- else:
- tt = fftpack.idst(f3*2, type=1, axis=0)/(2*(f3.shape[0]+1))
- del(f3)
- if tt.shape[1] == 1:
- img_tt = np.transpose(fftpack.idst(np.transpose(tt)*2, type=1)/(2*(tt.shape[0]+1)))
- else:
- img_tt = np.transpose(fftpack.idst(np.transpose(tt)*2, type=1, axis=0)/(2*(tt.shape[1]+1)))
- del(tt)
-
- # put solution in inner points; outer points obtained from boundary image
- img_direct = boundary_image
- img_direct[1:-1, 1:-1] = 0
- img_direct[1:-1, 1:-1] = img_tt
- return img_direct
-
-
-"""
-Created on Thu Jan 18 15:36:32 2018
-@author: italo
-https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
-"""
-
-"""
-Syntax
-h = fspecial(type)
-h = fspecial('average',hsize)
-h = fspecial('disk',radius)
-h = fspecial('gaussian',hsize,sigma)
-h = fspecial('laplacian',alpha)
-h = fspecial('log',hsize,sigma)
-h = fspecial('motion',len,theta)
-h = fspecial('prewitt')
-h = fspecial('sobel')
-"""
-
-
-def fspecial_average(hsize=3):
- """Smoothing filter"""
- return np.ones((hsize, hsize))/hsize**2
-
-
-def fspecial_disk(radius):
- """Disk filter"""
- # raise(NotImplemented)
- # rad = 0.6
- rad = radius
- crad = np.ceil(rad-0.5)
- [x, y] = np.meshgrid(np.arange(-crad, crad+1), np.arange(-crad, crad+1))
- maxxy = np.zeros(x.shape)
- maxxy[abs(x) >= abs(y)] = abs(x)[abs(x) >= abs(y)]
- maxxy[abs(y) >= abs(x)] = abs(y)[abs(y) >= abs(x)]
- minxy = np.zeros(x.shape)
- minxy[abs(x) <= abs(y)] = abs(x)[abs(x) <= abs(y)]
- minxy[abs(y) <= abs(x)] = abs(y)[abs(y) <= abs(x)]
- m1 = (rad**2 < (maxxy+0.5)**2 + (minxy-0.5)**2)*(minxy-0.5) +\
- (rad**2 >= (maxxy+0.5)**2 + (minxy-0.5)**2)*\
- np.sqrt((rad**2 + 0j) - (maxxy + 0.5)**2)
- m2 = (rad**2 > (maxxy-0.5)**2 + (minxy+0.5)**2)*(minxy+0.5) +\
- (rad**2 <= (maxxy-0.5)**2 + (minxy+0.5)**2)*\
- np.sqrt((rad**2 + 0j) - (maxxy - 0.5)**2)
- # sgrid = (rad**2 * (0.5*(asin(m2/rad) - asin(m1/rad)) + 0.25*(sin(2*asin(m2/rad)) - )
- h = None
- return h
-
-
-def fspecial_gaussian(hsize, sigma):
- hsize = [hsize, hsize]
- siz = [(hsize[0]-1.0)/2.0, (hsize[1]-1.0)/2.0]
- std = sigma
- [x, y] = np.meshgrid(np.arange(-siz[1], siz[1]+1), np.arange(-siz[0], siz[0]+1))
- arg = -(x*x + y*y)/(2*std*std)
- h = np.exp(arg)
- h[h < scipy.finfo(float).eps * h.max()] = 0
- sumh = h.sum()
- if sumh != 0:
- h = h/sumh
- return h
-
-
-
-def fspecial(filter_type, *args, **kwargs):
- '''
- python code from:
- https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
- '''
- if filter_type == 'average':
- return fspecial_average(*args, **kwargs)
- if filter_type == 'disk':
- return fspecial_disk(*args, **kwargs)
- if filter_type == 'gaussian':
- return fspecial_gaussian(*args, **kwargs)
-
-
-def traditional_blur(img, filter_type):
- ksize = np.random.choice([3, 5], 1)
- # ksize = 3
- if filter_type == 'average':
- img = np.array(img, np.float32)
- blur_img = cv2.medianBlur(img, ksize=ksize)
- return blur_img
- if filter_type == 'gaussian':
- blur_img = cv2.GaussianBlur(img, ksize=(ksize, ksize), sigmaX=0, sigmaY=0)
- return blur_img
-
-def motion_blur(img, angle=45):
- degree = np.random.randint(1, 8, 1) # 8模糊度已经足够,轻重模糊随机
- image = np.array(img)
-
- # 这里生成任意角度的运动模糊kernel的矩阵, degree越大,模糊程度越高
- M = cv2.getRotationMatrix2D((degree / 2, degree / 2), angle, 1)
- motion_blur_kernel = np.diag(np.ones(degree))
- motion_blur_kernel = cv2.warpAffine(motion_blur_kernel, M, (degree, degree))
-
- motion_blur_kernel = motion_blur_kernel / degree
- blurred = cv2.filter2D(image, -1, motion_blur_kernel)
-
- # convert to uint8
- cv2.normalize(blurred, blurred, 0, 255, cv2.NORM_MINMAX)
- blurred = (blurred-127.5)/128
- return blurred
-
-
-if __name__ == '__main__':
- a = opt_fft_size([111])
- print(a)
-
- print(fspecial('gaussian', 5, 1))
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
--
Gitee
From d0244153372cb6141624ff3e6fd8d745deda7ba4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 04:05:36 +0000
Subject: [PATCH 12/26] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?=
=?UTF-8?q?nsorFlow/contrib/cv/SRFBN=5Ffor=5FTensorFlow/psnr=5Fssim.py?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../cv/SRFBN_for_TensorFlow/psnr_ssim.py | 86 -------------------
1 file changed, 86 deletions(-)
delete mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/psnr_ssim.py
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/psnr_ssim.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/psnr_ssim.py
deleted file mode 100644
index c27924230..000000000
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/psnr_ssim.py
+++ /dev/null
@@ -1,86 +0,0 @@
-import numpy as np
-import cv2
-
-#灰度图psnr
-def compare_psnr_gray(im1,im2,maxI=255):
- im1=im1.astype(np.float64)
- im2=im2.astype(np.float64)
- diff=im1-im2
- mse=np.mean(np.square(diff))
- if mse==0: return float('inf')
- return 10*np.log10(maxI*maxI/mse)
-
-#彩色图psnr
-def compare_psnr_rgb(im1,im2,maxI=255):
- blue1,green1,red1=cv2.split(im1)
- blue2,green2,red2=cv2.split(im2)
- psnr_blue=compare_psnr_gray(blue1,blue2,maxI)
- psnr_green=compare_psnr_gray(green1,green2,maxI)
- psnr_red=compare_psnr_gray(red1,red2,maxI)
-
- #三通道取平均值
- return (psnr_blue+psnr_green+psnr_red)/3
-
-#可以直接用的(不能把灰度图和彩色图直接比较)
-def compare_psnr(im1,im2,maxI=255):
- #如果两张图片大小不同或者同为同一类型(灰度、彩色)图就报错
- if im1.shape!=im2.shape: raise ValueError("错误:图片大小维度不同")
- if (im1.ndim==2) and (im2.ndim==2): return compare_psnr_gray(im1,im2)
- #彩色图里可能有单通道(?)
- elif (im1.ndim==3) and (im2.ndim==3):
- if im1.shape[2]==3:
- return compare_psnr_rgb(im1,im2)
- elif im1.shape[2]==1:
- return compare_psnr_gray(np.squeeze(im1),np.squeeze(im2))
- else: raise ValueError("错误:错误的图片维度")
-
-#ssim
-def ssim(im1,im2,maxI=255):
- #0.01和0.03是已经定下来的常数,不要改
- c1=(0.01*maxI)**2
- c2=(0.03*maxI)**2
-
- #转换成float64类型
- im1=im1.astype(np.float64)
- im2=im2.astype(np.float64)
- #高斯核,这里11和1.5也是定下来的不要改
- kernel=cv2.getGaussianKernel(11,1.5)
- window=np.outer(kernel,kernel.transpose())
-
- #求卷积
- #ssim先将图片分成若干个小块后分别按照公式的各元素求各种卷积
- mu1=cv2.filter2D(im1,-1,window)[5:-5,5:-5]
- mu2=cv2.filter2D(im2,-1,window)[5:-5,5:-5]
- mu1_sq=mu1**2
- mu2_sq=mu2**2
- mu1_mu2=mu1*mu2
- sigma1_sq=cv2.filter2D(im1**2,-1,window)[5:-5,5:-5]-mu1_sq
- sigma2_sq=cv2.filter2D(im2**2,-1,window)[5:-5,5:-5]-mu2_sq
- sigma12=cv2.filter2D(im1*im2,-1,window)[5:-5,5:-5]-mu1_mu2
-
- #ssim的计算公式
- ssim_map=((2*mu1_mu2+c1)*(2*sigma12+c2))/((mu1_sq+mu2_sq+c1)*(sigma1_sq+sigma2_sq+c2))
- #取所有小块计算结果的平均值
- return ssim_map.mean()
-
-#可以直接用的
-def compare_ssim(im1,im2,maxI=255):
- #如果两张图片大小不同或者同为同一类型(灰度、彩色)图就报错
- if im1.shape!=im2.shape:
- raise ValueError("错误:图片维度大小不同")
- if im1.ndim==2:
- return ssim(im1,im2)
- #彩色图里可能有单通道(?)
- elif im1.ndim==3:
- if im1.shape[2]==3:
- blue1,green1,red1=cv2.split(im1)
- blue2,green2,red2=cv2.split(im2)
- ssim_blue=ssim(blue1,blue2)
- ssim_green=ssim(green1,green2)
- ssim_red=ssim(red1,red2)
-
- #同psnr,取平均值
- return (ssim_blue+ssim_green+ssim_red)/3
- elif im1.shape[2]==1:
- return ssim(np.squeeze(im1),np.squeeze(im2))
- else: raise ValueError("错误:错误的图片维度")
\ No newline at end of file
--
Gitee
From e462aac70a762a8e0db7e91d6a47a907f60960de Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 04:07:36 +0000
Subject: [PATCH 13/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md
index 79c405514..c322a7d11 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md
@@ -94,15 +94,13 @@ SRFBN是采取反馈连接来提高重建超分辨率图片效果的网络模型
├── README.md //代码说明文档
├── config.py //模型配置代码
├── PreProcess.py //数据预处理代码
-├── psnr_ssim.py //图像质量评估代码
├── requirements.txt //训练python依赖列表
├── SRFBN_model.py //SRFBN网络模型代码
├── test.py //测试代码
-├── traditional_blur.py //图像模糊处理代码
├── train.py //训练代码
├── test
│ ├──train_performance_1p.sh //单卡训练验证性能启动脚本
-
+│ ├──train_full_1p.sh //单卡全量训练启动脚本
```
## 脚本参数
--
Gitee
From 777f0cf88bb72295a0b0e7fb1cfc598c9ecf97b7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 04:11:40 +0000
Subject: [PATCH 14/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../cv/SRFBN_for_TensorFlow/PreProcess.py | 22 +++++++++----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
index 0d13c7cac..2d34ac1ab 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
@@ -33,17 +33,17 @@ import random
#from skimage import util
#给图像数据添加噪声
-#def add_noise(img):
- # mode_types = ['gaussian', 'localvar', 'poisson', 'speckle'] # 'salt', 'pepper', 's&p'这三个噪声太假了
- # inx = int(np.random.choice(np.arange(len(mode_types)), 1))
- # # inx = 0
- # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)#转换色彩空间为RGB
- # mean = random.random() * 0.001 # + 0.001#random.random()生成0到1之间的随机数
- # var = random.random() * 0.002 # + 0.01
- # noise_img = util.random_noise(img.copy(), mode=mode_types[inx],
- # mean=mean,
- # var=var)#添加噪声
- # return noise_img
+def add_noise(img):
+ mode_types = ['gaussian', 'localvar', 'poisson', 'speckle'] # 'salt', 'pepper', 's&p'这三个噪声太假了
+ inx = int(np.random.choice(np.arange(len(mode_types)), 1))
+ # inx = 0
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)#转换色彩空间为RGB
+ mean = random.random() * 0.001 # + 0.001#random.random()生成0到1之间的随机数
+ var = random.random() * 0.002 # + 0.01
+ noise_img = util.random_noise(img.copy(), mode=mode_types[inx],
+ mean=mean,
+ var=var)#添加噪声
+ return noise_img
#数据扩充或增强
def augment_data(img_patch, flip, rot): # img_patchs : n,h,w,c
--
Gitee
From 85a53505fcb7366fec6ae7a469bb4516b54fba99 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 04:11:57 +0000
Subject: [PATCH 15/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
index 2d34ac1ab..6afecec2f 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
@@ -30,7 +30,7 @@ from npu_bridge.npu_init import *
import cv2
import numpy as np
import random
-#from skimage import util
+from skimage import util
#给图像数据添加噪声
def add_noise(img):
--
Gitee
From 05a73ba80be4154c1b9b60075452e7c80488c4db Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 16:49:45 +0000
Subject: [PATCH 16/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../cv/SRFBN_for_TensorFlow/Basic_Model.py | 56 +++++++++----------
1 file changed, 27 insertions(+), 29 deletions(-)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py
index 3051156e8..700d4e99d 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/Basic_Model.py
@@ -32,39 +32,37 @@ from tensorflow.contrib import layers
import sys
class basic_network(object):
- #初始化网络配置
def __init__(self, cfg):
self.training=True
self.cfg = cfg
- self.params_count = 0#参数数量
- #初始化参数
+ self.params_count = 0 #the amount of parameters
def init_params(self, *args, **kwargs):
def _variable_on_cpu(w_shape, b_shape, weight_decay=0.99, use_bias=True, name="conv"):
- with tf.device('/cpu:0'):#使用cpu
- w = tf.Variable(tf.truncated_normal(w_shape, 0.0, 0.001), trainable=True, name="%s_w" % name)#权重
- tf.add_to_collection(name="weights_l2_loss", value=self.calc_l1_loss(w, weight_decay))#将value加入name为'weights_l2_loss'的列表中
- b = tf.Variable(tf.zeros(b_shape), trainable=use_bias, name="%s_b" % name)#偏置
- return w, b #返回参数w和b
- kernel_size = kwargs["kernel_size"]#kwargs为字典
+ with tf.device('/cpu:0'):
+ w = tf.Variable(tf.truncated_normal(w_shape, 0.0, 0.001), trainable=True, name="%s_w" % name)
+ tf.add_to_collection(name="weights_l2_loss", value=self.calc_l1_loss(w, weight_decay))
+ b = tf.Variable(tf.zeros(b_shape), trainable=use_bias, name="%s_b" % name)
+ return w, b
+ kernel_size = kwargs["kernel_size"]
in_channels = kwargs["in_channels"]
out_channels = kwargs["out_channels"]
# weight_decay = kwargs["weight_decay"]
- w_shape = [kernel_size, kernel_size, in_channels, out_channels]#权重的size
- b_shape = [out_channels]#b的size
+ w_shape = [kernel_size, kernel_size, in_channels, out_channels]
+ b_shape = [out_channels]
name = kwargs["name"]
self.params_count += kernel_size*kernel_size*in_channels*out_channels
- self.params_count += out_channels#参数的数量
- return _variable_on_cpu(w_shape, b_shape, use_bias=kwargs["use_bias"], name=name)#返回初始化后的w和b
- #计算代价函数,L1和L2
+ self.params_count += out_channels
+ return _variable_on_cpu(w_shape, b_shape, use_bias=kwargs["use_bias"], name=name)
+
def calc_loss(self, *args, **kwargs):
loss_type = kwargs["loss_type"]
x = kwargs["x"]
y = kwargs["y"]
- if loss_type == "L1":#代价函数类型
+ if loss_type == "L1":
return tf.reduce_sum(tf.abs(x-y), name="L1_loss")
elif loss_type == "L2":
return tf.nn.l2_loss((x-y), name="L2_loss")
- #激活函数
+
def activation(self, *args, **kwargs):
act_type = kwargs["act_type"]
act_type = act_type.lower()
@@ -80,31 +78,31 @@ class basic_network(object):
return tf.nn.tanh(args[0])
else:
return args[0]
- #计算L2型代价函数
+
def calc_l2_loss(self, weight, weight_decay):
- _, _, _, outchannel = weight.get_shape().as_list()#这里只需要获取输出的channel数
+ _, _, _, outchannel = weight.get_shape().as_list()
return (weight_decay) * tf.reduce_sum(tf.square(weight)) / outchannel
- #计算L1型代价函数
+
def calc_l1_loss(self, weight, weight_decay):
_, _, _, outchannel = weight.get_shape().as_list()
return (weight_decay)*tf.reduce_sum(tf.abs(weight)) / outchannel
- #批归一化处理
+
def batch_norm(self, *args, **kwargs):
- return tf.layers.batch_normalization(args[0], training=kwargs["training"])#第一个参数是输入
- #归一化的一种方法
+ return tf.layers.batch_normalization(args[0], training=kwargs["training"])
+
def instance_norm(self, *args, **kwargs):
return layers.instance_norm(args[0], kwargs["name"])
- #激活函数的一种
+
def hard_sigmoid(self, x):
return tf.nn.relu6((x+3)/6)
def hard_swish(self, x):
return x * self.hard_sigmoid(x)
- #平均池化
+
def global_average_pooling(self, x, name="GAP"):
return tf.reduce_mean(x, axis=[1, 2], keep_dims=True, name="Global_Average_Pooling_%s" % name)#不降维
- #定义卷积块
+
def ConvBlock(self,x, in_channels, out_channels, kernel_size, stride=1, name="ConvBlock",
BN=True, use_bias=True, padding="VALID", act_type="relu", mode="CNA"):
@@ -112,7 +110,7 @@ class basic_network(object):
assert (mode in ['CNA', 'NAC']), '[ERROR] Wrong mode in [%s]!' % sys.modules[__name__]#断言
weight, bias = self.init_params(kernel_size=kernel_size, in_channels=in_channels,
out_channels=out_channels, use_bias=use_bias, name=name)
- if mode == "CNA":#先卷积再归一化再激活
+ if mode == "CNA":
x = tf.nn.conv2d(x, filter=weight, strides=[1, stride, stride, 1], padding=padding)
x = tf.nn.bias_add(x, bias)
if BN:
@@ -136,15 +134,15 @@ class basic_network(object):
x = tf.nn.conv2d(x, filter=weight, strides=[1, stride, stride, 1], padding=padding)
x = tf.nn.bias_add(x, bias)
return x
- #反卷积块(上采样upsampling)
+
def DeConvBlock(self, x, in_channels, out_channels, kernel_size, stride=1, name="DeConvBlock",
BN=True, use_bias=True, padding="VALID", act_type="relu", mode="CNA"):
assert (mode in ['CNA', 'NAC']), '[ERROR] Wrong mode in [%s]!' % sys.modules[__name__]
- b, h, w, c = x.get_shape().as_list()#h为图像高度,w为宽度,b为batch,即一次处理的样本数,c为颜色通道数
+ b, h, w, c = x.get_shape().as_list()
out_shape = [b, h * self.cfg.scale, w * self.cfg.scale, out_channels]
weight, bias = self.init_params(kernel_size=kernel_size, in_channels=out_channels,
out_channels=in_channels, use_bias=use_bias, name=name)
- if mode == "CNA":#反卷积函数(扩大图像尺寸)(其实也是卷积)
+ if mode == "CNA":
x = tf.nn.conv2d_transpose(x, filter=weight, output_shape=out_shape,
strides=[1, stride, stride, 1], padding=padding)
x = tf.nn.bias_add(x, bias)
--
Gitee
From 5ebe5d786814f2380ba1717d91d7d184d449119b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 16:52:53 +0000
Subject: [PATCH 17/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../cv/SRFBN_for_TensorFlow/PreProcess.py | 28 +++++++++----------
1 file changed, 14 insertions(+), 14 deletions(-)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
index 6afecec2f..5ed011cdb 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/PreProcess.py
@@ -32,38 +32,38 @@ import numpy as np
import random
from skimage import util
-#给图像数据添加噪声
+
def add_noise(img):
- mode_types = ['gaussian', 'localvar', 'poisson', 'speckle'] # 'salt', 'pepper', 's&p'这三个噪声太假了
+ mode_types = ['gaussian', 'localvar', 'poisson', 'speckle'] # 'salt', 'pepper', 's&p' is too fake
inx = int(np.random.choice(np.arange(len(mode_types)), 1))
- # inx = 0
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)#转换色彩空间为RGB
- mean = random.random() * 0.001 # + 0.001#random.random()生成0到1之间的随机数
+ inx = 0
+ img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB)
+ mean = random.random() * 0.001 # + 0.001#random.random() generates number between 0 and 1
var = random.random() * 0.002 # + 0.01
noise_img = util.random_noise(img.copy(), mode=mode_types[inx],
mean=mean,
- var=var)#添加噪声
+ var=var)
return noise_img
-#数据扩充或增强
+
def augment_data(img_patch, flip, rot): # img_patchs : n,h,w,c
if flip==1:
- img_patch = img_patch[:, ::-1, :] # hflip#水平翻转
+ img_patch = img_patch[:, ::-1, :] # hflip
elif flip==2:
- img_patch = img_patch[::-1, :, :] # vflip#垂直翻转
+ img_patch = img_patch[::-1, :, :] # vflip
if rot==1:
- img_patch = cv2.rotate(img_patch, cv2.ROTATE_90_CLOCKWISE)#顺时针旋转90
+ img_patch = cv2.rotate(img_patch, cv2.ROTATE_90_CLOCKWISE)
elif rot==2:
img_patch = cv2.rotate(img_patch, cv2.ROTATE_90_COUNTERCLOCKWISE)
return img_patch
-#预处理数据
+
def preprocess(imgs, cfg):
LR_patchs, HR_patchs = [], []
for img in imgs:
- HR = cv2.imread(img.strip(), cv2.IMREAD_COLOR)#读取图片路径,并以RGB模式
- HR = (HR - 127.5) / 128#归一化
- h, w, c = HR.shape#高度,宽度,颜色通道数
+ HR = cv2.imread(img.strip(), cv2.IMREAD_COLOR)
+ HR = (HR - 127.5) / 128
+ h, w, c = HR.shape
x_stride = w // (cfg.imagesize * cfg.scale)
y_stride = h // (cfg.imagesize * cfg.scale)
--
Gitee
From e9a93bb612a6a6e80feeb91fb92e85d7dfe76810 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 16:55:28 +0000
Subject: [PATCH 18/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py
index 1ce90c923..4afaf9bb5 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/SRFBN_model.py
@@ -43,7 +43,7 @@ class SRFBN(basic_network):
self.last_hidden = None
self.should_reset = True
self.outs = []
- #FB块
+ #FB block that forwardpros feedback information
def FeedBackBlock(self, x, num_features, num_groups, act_type, name="FBB"):
if self.cfg.scale == 1:
stride = 1
@@ -110,7 +110,7 @@ class SRFBN(basic_network):
stride = 4
padding = "SAME"
kernel_size = 8
- # x = self.sub_mean(self.imageplaceholder) # 暂且当作归一化
+ # x = self.sub_mean(self.imageplaceholder) # normalize
_, height, width, _ = self.imageplaceholder.get_shape().as_list()
@@ -136,7 +136,7 @@ class SRFBN(basic_network):
# t = t + inter_res
# t = self.add_mean(t)
self.outs.append(t)
- #训练步骤
+
def train_step(self):
self.build()
print("This Net has Params num is %f MB" % (self.params_count * 4 / 1024 / 1024)) # float32
@@ -156,7 +156,7 @@ class SRFBN(basic_network):
self.merged_summary = tf.summary.merge_all()
self.saver = tf.train.Saver(max_to_keep=1)
- #加载检查点
+ #loading ckpt
def load(self):
model_name = "SRFBN.model"
model_dir = "%s_%s_%s_%s_c%d_x%s" % (
@@ -173,7 +173,7 @@ class SRFBN(basic_network):
print("\nCheckpoint Loading Failed! \n")
return step
- #保存当前模型
+ #save model
def save(self, step):
model_name = "SRFBN.model"
model_dir = "%s_%s_%s_%s_c%d_x%s" % \
@@ -187,7 +187,7 @@ class SRFBN(basic_network):
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
- #测试
+ #test
def test(self, width, height):
self.cfg.batchsize = 1
testshape = [self.cfg.batchsize, height, width, self.cfg.c_dim]
--
Gitee
From 271fa9374abac531090ce2e9b5896f690e912310 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 16:58:52 +0000
Subject: [PATCH 19/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../contrib/cv/SRFBN_for_TensorFlow/config.py | 36 +++++++++----------
1 file changed, 18 insertions(+), 18 deletions(-)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py
index 4da2aec8f..391bbadcc 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/config.py
@@ -31,15 +31,15 @@ import os
class config:
def __init__(self):
- self.batchsize = 1#一次处理的样本数量
- self.Process_num = 3 #进程数量
- self.maxsize = 200 #最大的大小
- self.ngpu = 1 #gpu数量
- self.imagesize = 64#图片大小
- self.scale = 3#缩放规模
- self.epoch = 1000#迭代次数
- #创建检查点,记录,结果目录
- self.checkpoint_dir = "./model"#检查点目录
+ self.batchsize = 1
+ self.Process_num = 3
+ self.maxsize = 200
+ self.ngpu = 1
+ self.imagesize = 64
+ self.scale = 3
+ self.epoch = 1000
+ #create ckpt,log,result dir
+ self.checkpoint_dir = "./model"
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
self.log_dir = "./log"
@@ -54,24 +54,24 @@ class config:
class SRFBN_config(config):
def __init__(self):
super(SRFBN_config, self).__init__()
- self.istrain = True#正在训练还是正在测试
+ self.istrain = True#is train or is test
self.istest = not self.istrain
- self.c_dim = 3 #color channel 可以训练灰度图也可以训练RGB图
+ self.c_dim = 3 #color channel can train one-channel pic or RGB pic
self.in_channels = 3
self.out_channels = 3
self.num_features = 32#base number of filter
- self.num_steps = 4#时间步
- self.num_groups = 6#FBB中feedbackblock中projection group数量
- self.BN = True#
+ self.num_steps = 4# timestep
+ self.num_groups = 6#the number of projection group of FBB feedbackblock
+ self.BN = True
if self.BN:
self.BN_type = "BN" # "BN" # or "IN"
self.act_type = "prelu" #activation function
self.loss_type = "L2"
- self.lr_steps = [150, 300, 550, 750]#迭代次数表
- self.lr_gama = 1#参数
- self.learning_rate = 2e-7#学习率
+ self.lr_steps = [150, 300, 550, 750]#iteration
+ self.lr_gama = 1
+ self.learning_rate = 2e-7#learning rate
self.load_premodel = True
- #创建目录
+ #create dir
self.srfbn_logdir = "%s/srfbn" % self.log_dir
if not os.path.exists(self.srfbn_logdir):
os.mkdir(self.srfbn_logdir)
--
Gitee
From 579bd4ef7bed2567aa55ffa0f9df139b6f7b7f05 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 17:03:02 +0000
Subject: [PATCH 20/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py
index 58e3a8272..46f8808e3 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/train.py
@@ -43,7 +43,7 @@ def train_SRFBN(dataset, sess, cfg):
srfbn.train_step()
out = tf.add_n(srfbn.outs) / srfbn.cfg.num_steps
## build Optimizer
- #使学习率在不同迭代阶段不同
+ #make lr_rate different in different stages
boundaries = [len(dataset)*epoch//cfg.batchsize for epoch in cfg.lr_steps]
values = [cfg.learning_rate*(cfg.lr_gama**i) for i in range(len(cfg.lr_steps)+1)]
lr = tf.train.piecewise_constant(step, boundaries, values)
@@ -57,7 +57,7 @@ def train_SRFBN(dataset, sess, cfg):
tf.global_variables_initializer().run(session=sess)
summary_writer = tf.summary.FileWriter(cfg.srfbn_logdir, srfbn.sess.graph)
- #加载模型
+ #load model
if srfbn.cfg.load_premodel:
counter = srfbn.load()
else:
@@ -67,10 +67,10 @@ def train_SRFBN(dataset, sess, cfg):
global_step = 0
for ep in range(cfg.epoch):
- #每次训练时挑选照片的顺序是随机的
+ #pick pic randomly
pic_idx = np.random.permutation(len(dataset))
picid = 0
- #一次加载五张图像
+ #load five pics one time
for i in range(0,len(dataset),5):
index = []
for j in range(5):
@@ -82,7 +82,7 @@ def train_SRFBN(dataset, sess, cfg):
print(imgnames)
batch_labels, batch_images = preprocess(imgnames, cfg)
patch_idx = list(range(len(batch_labels)))
- #使得图片块的数量刚好能被batchsize整除
+ #make the number of pic chunk divided by batchsize
if len(patch_idx) % cfg.batchsize != 0:
patch_idx.extend(list(np.random.choice(patch_idx,
cfg.batchsize * ((len(patch_idx) // cfg.batchsize)+1) - len(patch_idx))))
@@ -126,7 +126,7 @@ def train_SRFBN(dataset, sess, cfg):
global_step += 1
counter += 1
-#训练
+#train
def train(*args, **kwargs):
data_dir = kwargs["data_dir"]
imgs = [os.path.join(data_dir,data) for data in os.listdir(data_dir)]
--
Gitee
From 89fb08fcabe32ab716f751af4b9b18ad91b5b7f0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 17:05:08 +0000
Subject: [PATCH 21/26] =?UTF-8?q?=E6=8F=90=E4=BA=A4=E5=8D=95=E5=8D=A1?=
=?UTF-8?q?=E5=85=A8=E9=87=8F=E8=AE=AD=E7=BB=83=E8=84=9A=E6=9C=AC?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../test/train_full_1p.sh | 184 ++++++++++++++++++
1 file changed, 184 insertions(+)
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/train_full_1p.sh
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/train_full_1p.sh b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/train_full_1p.sh
new file mode 100644
index 000000000..1242fb044
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/train_full_1p.sh
@@ -0,0 +1,184 @@
+#!/bin/bash
+
+##########################################################
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+##########################################################
+# shell脚本所在路径
+cur_path=`echo $(cd $(dirname $0);pwd)`
+
+# 判断当前shell是否是performance
+perf_flag=`echo $0 | grep performance | wc -l`
+
+# 当前执行网络的名称
+Network=`echo $(cd $(dirname $0);pwd) | awk -F"/" '{print $(NF-1)}'`
+
+export RANK_SIZE=1
+export RANK_ID=0
+export JOB_ID=10087
+
+# 路径参数初始化
+data_path=""
+output_path=""
+
+# 帮助信息,不需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo "usage:./train_performance_1P.sh "
+ echo " "
+ echo "parameter explain:
+ --data_path # dataset of training
+ --output_path # output of training
+ --train_steps # max_step for training
+ --train_epochs # max_epoch for training
+ --batch_size # batch size
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+# 参数校验,不需要修改
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --output_path* ]];then
+ output_path=`echo ${para#*=}`
+ elif [[ $para == --train_steps* ]];then
+ train_steps=`echo ${para#*=}`
+ elif [[ $para == --train_epochs* ]];then
+ train_epochs=`echo ${para#*=}`
+ elif [[ $para == --batch_size* ]];then
+ batch_size=`echo ${para#*=}`
+ fi
+done
+
+# 校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be config"
+ exit 1
+fi
+
+# 校验是否传入output_path,不需要修改
+if [[ $output_path == "" ]];then
+ output_path="./output/${ASCEND_DEVICE_ID}"
+fi
+
+# 设置打屏日志文件名,请保留,文件名为${print_log}
+print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log"
+modelarts_flag=`cat /etc/passwd |grep ma-user`
+if [ x"${modelarts_flag}" != x ];
+then
+ echo "running with modelarts_flag..."
+ print_log_name=`ls /home/ma-user/modelarts/log/ | grep proc-rank`
+ print_log="/home/ma-user/modelarts/log/${print_log_name}"
+fi
+echo "### get your log here : ${print_log}"
+
+CaseName=""
+function get_casename()
+{
+ if [ x"${perf_flag}" = x1 ];
+ then
+ CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'perf'
+ else
+ CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'acc'
+ fi
+}
+
+# 跳转到code目录
+cd ${cur_path}/../
+rm -rf ./test/output/${ASCEND_DEVICE_ID}
+mkdir -p ./test/output/${ASCEND_DEVICE_ID}
+
+# 训练开始时间记录,不需要修改
+start_time=$(date +%s)
+##########################################################
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+##########################################################
+
+#=========================================================
+#=========================================================
+#========训练执行命令,需要根据您的网络进行修改==============
+#=========================================================
+#=========================================================
+# 基础参数,需要模型审视修改
+# 您的训练数据集在${data_path}路径下,请直接使用这个变量获取
+# 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取
+# 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值
+batch_size=1
+
+if [ x"${modelarts_flag}" != x ];
+then
+ python3 ./train.py
+else
+ python3.7 ./train.py --data_path=${data_path} --output_path=${output_path} 1>${print_log} 2>&1
+fi
+
+# 性能相关数据计算
+StepTime=`((cat ${print_log} | grep "time" | head -n 1) && (cat ${print_log} | grep "time" | tail -n 1)) | awk -F ':' '{print $5 $6 }' | awk -F ',' '{print $1 $2}' | awk -F ' ' '{print $1;print $3}' | awk '{if (NR == 1){a=$1} else if (NR == 2){b=$1} else if (NR == 3){c=$1} else if (NR == 4){d=$1}} END {print (d-b)/(c-a)}'`
+FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'/'${StepTime}'}'`
+#PSNR值计算
+PSNR=`cat ${print_log} | grep "time" | tail -n 10 | awk -F ',' '{print $8}' | awk -F ':' '{sum+=$2} END {print sum/NR}'`
+# 提取所有loss打印信息
+grep "loss:" ${print_log} | awk -F "," '{print $6}' > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt
+
+
+###########################################################
+#########后面的所有内容请不要修改###########################
+#########后面的所有内容请不要修改###########################
+#########后面的所有内容请不要修改###########################
+###########################################################
+
+# 判断本次执行是否正确使用Ascend NPU
+tf_flag=`echo ${Network} | grep TensorFlow | wc -l`
+use_npu_flag=`grep "The model has been compiled on the Ascend AI processor" ${print_log} | wc -l`
+if [ x"${use_npu_flag}" == x0 -a x"${tf_flag}" == x1 ];
+then
+ echo "------------------ ERROR NOTICE START ------------------"
+ echo "ERROR, your task haven't used Ascend NPU, please check your npu Migration."
+ echo "------------------ ERROR NOTICE END------------------"
+else
+ echo "------------------ INFO NOTICE START------------------"
+ echo "INFO, your task have used Ascend NPU, please check your result."
+ echo "------------------ INFO NOTICE END------------------"
+fi
+
+# 获取最终的casename,请保留,case文件名为${CaseName}
+get_casename
+
+# 重命名loss文件
+if [ -f ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ];
+then
+ mv ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt
+fi
+
+# 训练端到端耗时
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+echo "------------------ Final result ------------------"
+# 输出性能FPS/单step耗时/端到端耗时
+echo "Final Performance images/sec : $FPS"
+echo "Final Performance sec/step : $StepTime"
+echo "E2E Training Duration sec : $e2e_time"
+echo "PSNR : $PSNR"
+# 输出训练精度
+#echo "Final Train Accuracy : ${train_accuracy}"
+
+# 最后一个迭代loss值,不需要修改
+ActualLoss=(`awk 'END {print $NF}' ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt`)
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${batch_size}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = `uname -m`" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${FPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${StepTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+#echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
--
Gitee
From 658e31da64084342affe906a73daeb0d39fd5d28 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 17:07:29 +0000
Subject: [PATCH 22/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/train_performance_1p.sh.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../cv/SRFBN_for_TensorFlow/test/train_performance_1p.sh | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/train_performance_1p.sh b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/train_performance_1p.sh
index 5882b5f4f..708a9d41c 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/train_performance_1p.sh
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test/train_performance_1p.sh
@@ -100,8 +100,7 @@ start_time=$(date +%s)
train_epochs=1
batch_size=1
print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log"
-python3 ./train.py > ${print_log}
-# 性能相关数据计算
+python3.7 ./train.py --data_path=${data_path} --output_path=${output_path} 1>${print_log} 2>&1
StepTime=`((cat ${print_log} | grep "time" | head -n 1) && (cat ${print_log} | grep "time" | tail -n 1)) | awk -F ':' '{print $5 $6 }' | awk -F ',' '{print $1 $2}' | awk -F ' ' '{print $1;print $3}' | awk '{if (NR == 1){a=$1} else if (NR == 2){b=$1} else if (NR == 3){c=$1} else if (NR == 4){d=$1}} END {print (d-b)/(c-a)}'`
FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'/'${StepTime}'}'`
#PSNR值计算
--
Gitee
From 17f28971c5f881303051330cd3c39416d014d470 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 17:09:31 +0000
Subject: [PATCH 23/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py
index 66419630c..4d1161511 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py
@@ -56,7 +56,7 @@ def test_SRFBN(image_lr,image_hr):
srfbn.outs]
srfbn.losses = tf.reduce_sum(srfbn.losses) / len(srfbn.losses) / srfbn.cfg.batchsize + srfbn.l2_regularization_loss
load_flag += 1
- cv2.namedWindow("result", 0)
+ #cv2.namedWindow("result", 0)
img_hr = image_hr.reshape([1,height*srfbn.cfg.scale,width*srfbn.cfg.scale,3])
img_lr = image_lr.reshape([1, height, width, 3])
@@ -66,8 +66,8 @@ def test_SRFBN(image_lr,image_hr):
PSNR = compare_psnr(output, img_hr, data_range=255)
ssim = _structural_similarity.structural_similarity(output, img_hr,win_size=11, data_range=255, multichannel=True)
print("loss:[%.8f], l2_loss:[%.8f], PSNR:[%.8f], SSIM:[%.8f]"%(err,l2_loss,PSNR,ssim))
- cv2.imshow("result", np.uint8(output))
- cv2.waitKey(0)
+ #cv2.imshow("result", np.uint8(output))
+ #cv2.waitKey(0)
--
Gitee
From b0997c0cffbd8cadcfe2ef5262bca033c65b5c3a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Sun, 6 Nov 2022 17:15:24 +0000
Subject: [PATCH 24/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py
index 4d1161511..be8f5570f 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/test.py
@@ -41,7 +41,6 @@ def test_SRFBN(image_lr,image_hr):
#image
height, width, _ = image_lr.shape
- print(height,width)
global load_flag
global srfbn
global out
--
Gitee
From 1eac013c1a195ab78d931c9c45c5eba19992c8fe Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Mon, 7 Nov 2022 03:45:01 +0000
Subject: [PATCH 25/26] update
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md
index c322a7d11..edf8fc968 100644
--- a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/README.md
@@ -23,7 +23,7 @@ SRFBN是采取反馈连接来提高重建超分辨率图片效果的网络模型
```
- 参考论文:
- https://arxiv.org/abs/1903.09814v2
+ https://arxiv.org/pdf/1903.09814.pdf
- 参考实现:
--
Gitee
From eb9628e7edd96dc6f368adc5d7e4293ae3e99023 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E8=81=82=E4=BD=B3=E5=A8=81?= <2929019588@qq.com>
Date: Mon, 7 Nov 2022 04:52:50 +0000
Subject: [PATCH 26/26] =?UTF-8?q?=E8=AE=AD=E7=BB=83=E7=8A=B6=E6=80=81?=
=?UTF-8?q?=E8=AF=B4=E6=98=8E?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: 聂佳威 <2929019588@qq.com>
---
.../contrib/cv/SRFBN_for_TensorFlow/modelzoo_level.txt | 6 ++++++
1 file changed, 6 insertions(+)
create mode 100644 TensorFlow/contrib/cv/SRFBN_for_TensorFlow/modelzoo_level.txt
diff --git a/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/modelzoo_level.txt b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/modelzoo_level.txt
new file mode 100644
index 000000000..9981888d4
--- /dev/null
+++ b/TensorFlow/contrib/cv/SRFBN_for_TensorFlow/modelzoo_level.txt
@@ -0,0 +1,6 @@
+GPUStatus:OK
+NPUMigrationStatus:OK
+FuncStatus:OK
+PrecisionStatus:POK
+AutoTune:OK
+PerfStatus:PERFECT
\ No newline at end of file
--
Gitee