From 07f0f32ba80ed28deb1d980638b6d7da43187561 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=BA=E6=B0=B4=E7=BE=8E=E4=BA=BA=E9=B1=BC?= <522341892@qq.com> Date: Thu, 12 May 2022 05:52:43 +0000 Subject: [PATCH 1/9] update TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py. --- .../contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py b/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py index 4052bf652..56481f471 100644 --- a/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py +++ b/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py @@ -53,7 +53,8 @@ def filter_image(image, kernel, mode='valid'): """ Implements color filtering (convolution using a flipped kernel) """ chs = [] for d in range(image.shape[2]): - channel = sig.convolve2d(image[:,:,d], np.flipud(np.fliplr(kernel)), mode=mode) + # channel = sig.convolve2d(image[:,:,d], np.flipud(np.fliplr(kernel)), mode=mode) + channel = sig.fftconvolve(image[:, :, d], np.flipud(np.fliplr(kernel)), mode=mode) chs.append(channel) return np.stack(chs, axis=2) @@ -61,7 +62,8 @@ def convolve_image(image, kernel, mode='valid'): """ Implements color image convolution """ chs = [] for d in range(image.shape[2]): - channel = sig.convolve2d(image[:,:,d], kernel, mode=mode) + # channel = sig.convolve2d(image[:,:,d], kernel, mode=mode) + channel = sig.fftconvolve(image[:, :, d], kernel, mode=mode) chs.append(channel) return np.stack(chs, axis=2) -- Gitee From 8514e8855a624600d26fc4fa15b941fef0ef5885 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=BA=E6=B0=B4=E7=BE=8E=E4=BA=BA=E9=B1=BC?= <522341892@qq.com> Date: Thu, 12 May 2022 08:52:32 +0000 Subject: [PATCH 2/9] update TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/README.md. --- .../cv/DMSP_ID1290_for_Tensorflow/README.md | 157 ++++++++++-------- 1 file changed, 90 insertions(+), 67 deletions(-) diff --git a/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/README.md b/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/README.md index e72954050..d5f4366cd 100644 --- a/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/README.md +++ b/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/README.md @@ -151,86 +151,109 @@ $ python ./src/demo_DMSP.py 通过“模型训练”中的训练指令启动单卡训练 ``` -2022-02-23 22:32:03.855277: W tensorflow/core/platform/profile_utils/cpu_utils.cc:98] Failed to find bogomips in /proc/cpuinfo; cannot determine CPU frequency -2022-02-23 22:32:03.864021: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0xaaaade38ad00 initialized for platform Host (this does not guarantee that XLA will be used). Devices: -2022-02-23 22:32:03.864068: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version -============start non-blind deblurring on Berkeley segmentation dataset============== -WARNING:tensorflow:From /home/ma-user/modelarts/user-job-dir/code/dmsp-tensorflow/DAE_model.py:37: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead. +WARNING:tensorflow:From /usr/local/Ascend/tfplugin/latest/tfplugin/python/site-packages/npu_bridge/estimator/npu/npu_optimizer.py:127: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead. -WARNING:tensorflow:From /home/ma-user/modelarts/user-job-dir/code/dmsp-tensorflow/DAE_model.py:37: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead. +WARNING:tensorflow:From demo_DMSP.py:25: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead. -WARNING:tensorflow:From /home/ma-user/modelarts/user-job-dir/code/dmsp-tensorflow/DAE_model.py:38: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead. +WARNING:tensorflow:From demo_DMSP.py:51: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead. -WARNING:tensorflow:From /home/ma-user/modelarts/user-job-dir/code/dmsp-tensorflow/DAE_model.py:38: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead. +2022-03-21 23:33:35.537585: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 AVX512F FMA +2022-03-21 23:33:35.575026: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2600000000 Hz +2022-03-21 23:33:35.580988: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x559177050270 initialized for platform Host (this does not guarantee that XLA will be used). Devices: +2022-03-21 23:33:35.581050: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version +2022-03-21 23:33:35.584501: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/Ascend/ascend-toolkit/latest/fwkacllib/lib64:/usr/lib:/usr/local/python3.7.5/lib: +2022-03-21 23:33:35.584541: E tensorflow/stream_executor/cuda/cuda_driver.cc:318] failed call to cuInit: UNKNOWN ERROR (303) +2022-03-21 23:33:35.584575: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (ubuntu): /proc/driver/nvidia/version does not exist +============start non-blind deblurring on Berkeley segmentation dataset============== +WARNING:tensorflow:From /home/test_user05/dmsp-tensorflow/DAE_model.py:37: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead. -WARNING:tensorflow:From /home/ma-user/modelarts/user-job-dir/code/dmsp-tensorflow/DAE_model.py:42: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead. +WARNING:tensorflow:From /home/test_user05/dmsp-tensorflow/DAE_model.py:38: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead. -WARNING:tensorflow:From /home/ma-user/modelarts/user-job-dir/code/dmsp-tensorflow/DAE_model.py:42: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead. +WARNING:tensorflow:From /home/test_user05/dmsp-tensorflow/DAE_model.py:42: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead. -WARNING:tensorflow:From /home/ma-user/modelarts/user-job-dir/code/dmsp-tensorflow/DAE_model.py:7: calling Constant.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version. +WARNING:tensorflow:From /home/test_user05/dmsp-tensorflow/DAE_model.py:7: calling Constant.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version. Instructions for updating: Call initializer instance with the dtype argument instead of passing it to the constructor -WARNING:tensorflow:From /home/ma-user/modelarts/user-job-dir/code/dmsp-tensorflow/DAE_model.py:7: calling Constant.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version. -Instructions for updating: -Call initializer instance with the dtype argument instead of passing it to the constructor -WARNING:tensorflow:From /home/ma-user/modelarts/user-job-dir/code/dmsp-tensorflow/DAE_model.py:8: The name tf.get_variable is deprecated. Please use tf.compat.v1.get_variable instead. - -WARNING:tensorflow:From /home/ma-user/modelarts/user-job-dir/code/dmsp-tensorflow/DAE_model.py:8: The name tf.get_variable is deprecated. Please use tf.compat.v1.get_variable instead. +WARNING:tensorflow:From /home/test_user05/dmsp-tensorflow/DAE_model.py:8: The name tf.get_variable is deprecated. Please use tf.compat.v1.get_variable instead. ====================dae================ {'layer0': , 'layer1': , 'layer2': , 'layer3': , 'layer4': , 'layer5': , 'layer6': , 'layer7': , 'layer8': , 'layer9': , 'layer10': , 'layer11': , 'layer12': , 'layer13': , 'layer14': , 'layer15': , 'layer16': , 'layer17': , 'layer18': , 'layer19': , 'layer20': , 'layer21': , 'layer22': , 'layer23': , 'layer24': , 'layer25': , 'layer26': , 'layer27': , 'layer28': , 'layer29': , 'layer30': , 'layer31': , 'layer32': , 'layer33': , 'layer34': , 'layer35': , 'layer36': , 'layer37': , 'layer38': } ====================dae output========= Tensor("strided_slice_1:0", shape=(?, ?, ?, 3), dtype=float32) -WARNING:tensorflow:From /home/ma-user/modelarts/user-job-dir/code/dmsp-tensorflow/DAE_model.py:51: The name tf.global_variables_initializer is deprecated. Please use tf.compat.v1.global_variables_initializer instead. - -WARNING:tensorflow:From /home/ma-user/modelarts/user-job-dir/code/dmsp-tensorflow/DAE_model.py:51: The name tf.global_variables_initializer is deprecated. Please use tf.compat.v1.global_variables_initializer instead. - -2022-02-23 22:32:14.897055: I /home/jenkins/agent/workspace/Compile_GraphEngine_Centos_ARM/tensorflow/tf_adapter/kernels/geop_npu.cc:694] The model has been compiled on the Ascend AI processor, current graph id is:1 -Initialized with PSNR: 17.78958876047073 -2022-02-23 22:32:40.320450: I /home/jenkins/agent/workspace/Compile_GraphEngine_Centos_ARM/tensorflow/tf_adapter/kernels/geop_npu.cc:694] The model has been compiled on the Ascend AI processor, current graph id is:11 -Finished psnr = 27.65 (1.5 examples/sec; 0.646 sec/batch) -Initialized with PSNR: 21.71935774044799 -Finished psnr = 29.31 (1.4 examples/sec; 0.697 sec/batch) -Initialized with PSNR: 12.418238314349477 -Finished psnr = 21.70 (1.4 examples/sec; 0.704 sec/batch) -Initialized with PSNR: 17.761670521195924 -Finished psnr = 27.69 (1.5 examples/sec; 0.672 sec/batch) -Initialized with PSNR: 23.028104067351563 -Finished psnr = 32.53 (1.4 examples/sec; 0.704 sec/batch) -Initialized with PSNR: 15.075084013742561 -Finished psnr = 27.08 (1.4 examples/sec; 0.703 sec/batch) -Initialized with PSNR: 17.302924438930848 -Finished psnr = 24.16 (1.2 examples/sec; 0.824 sec/batch) -Initialized with PSNR: 17.10059787725738 -Finished psnr = 25.20 (1.3 examples/sec; 0.751 sec/batch) -Initialized with PSNR: 16.07467978560146 -Finished psnr = 25.66 (1.4 examples/sec; 0.712 sec/batch) -Initialized with PSNR: 15.523285818788821 -Finished psnr = 25.79 (1.4 examples/sec; 0.718 sec/batch) -Initialized with PSNR: 20.173765682212093 -Finished psnr = 33.91 (1.5 examples/sec; 0.688 sec/batch) -Initialized with PSNR: 17.809478987327715 -Finished psnr = 29.48 (1.6 examples/sec; 0.640 sec/batch) -Initialized with PSNR: 18.0941733503732 -Finished psnr = 33.18 (1.4 examples/sec; 0.702 sec/batch) -Initialized with PSNR: 17.11170706335929 -Finished psnr = 24.92 (1.4 examples/sec; 0.705 sec/batch) -Initialized with PSNR: 16.409065638468267 -Finished psnr = 29.45 (1.4 examples/sec; 0.727 sec/batch) -Initialized with PSNR: 16.58872443970573 -Finished psnr = 26.77 (1.4 examples/sec; 0.702 sec/batch) -Initialized with PSNR: 16.632015946049982 -Finished psnr = 28.54 (1.2 examples/sec; 0.805 sec/batch) -Initialized with PSNR: 14.895557404412923 -Finished psnr = 25.84 (1.3 examples/sec; 0.741 sec/batch) -Initialized with PSNR: 17.557421710572992 -Finished psnr = 25.67 (1.4 examples/sec; 0.702 sec/batch) -Initialized with PSNR: 23.73822886222646 -Finished psnr = 31.20 (1.1 examples/sec; 0.895 sec/batch) -Initialized with PSNR: 14.288116614544533 -Finished psnr = 21.96 (1.4 examples/sec; 0.735 sec/batch) -Initialized with PSNR: 19.533104118880125 -Finished psnr = 28.99 (1.4 examples/sec; 0.710 sec/batch) +WARNING:tensorflow:From /home/test_user05/dmsp-tensorflow/DAE_model.py:51: The name tf.global_variables_initializer is deprecated. Please use tf.compat.v1.global_variables_initializer instead. + +2022-03-21 23:33:35.866972: W /home/jenkins/agent/workspace/Compile_GraphEngine_Centos_X86/tensorflow/tf_adapter/util/ge_plugin.cc:124] [GePlugin] can not find Environment variable : JOB_ID +2022-03-21 23:33:39.807011: I /home/jenkins/agent/workspace/Compile_GraphEngine_Centos_X86/tensorflow/tf_adapter/kernels/geop_npu.cc:749] The model has been compiled on the Ascend AI processor, current graph id is:1 +Initialized with PSNR: 18.26756789065104 +2022-03-21 23:33:52.281454: I /home/jenkins/agent/workspace/Compile_GraphEngine_Centos_X86/tensorflow/tf_adapter/kernels/geop_npu.cc:749] The model has been compiled on the Ascend AI processor, current graph id is:11 +Finished psnr = 25.43 (20.0 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 19.61013455418367 +Finished psnr = 29.58 (20.0 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 16.046844525072277 +Finished psnr = 26.21 (19.3 examples/sec; 0.052 sec/batch) +Initialized with PSNR: 19.088294082853533 +Finished psnr = 24.01 (20.3 examples/sec; 0.049 sec/batch) +Initialized with PSNR: 27.903391840839276 +Finished psnr = 33.05 (19.9 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 17.58393445793693 +Finished psnr = 25.87 (19.3 examples/sec; 0.052 sec/batch) +Initialized with PSNR: 21.496189549703043 +Finished psnr = 27.39 (20.3 examples/sec; 0.049 sec/batch) +Initialized with PSNR: 17.183577420828943 +Finished psnr = 24.84 (19.2 examples/sec; 0.052 sec/batch) +Initialized with PSNR: 18.31449854593027 +Finished psnr = 27.68 (20.2 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 14.78985085202309 +Finished psnr = 22.40 (19.9 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 18.795507564810553 +Finished psnr = 27.73 (19.6 examples/sec; 0.051 sec/batch) +Initialized with PSNR: 16.154563492696358 +Finished psnr = 24.16 (19.9 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 19.207686742438906 +Finished psnr = 27.37 (19.9 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 18.436603775139783 +Finished psnr = 27.64 (20.2 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 23.00822262524223 +Finished psnr = 29.49 (20.7 examples/sec; 0.048 sec/batch) +Initialized with PSNR: 15.08249880666261 +Finished psnr = 23.16 (20.5 examples/sec; 0.049 sec/batch) +Initialized with PSNR: 21.944237300656955 +Finished psnr = 29.84 (19.7 examples/sec; 0.051 sec/batch) +Initialized with PSNR: 18.858999334757787 +Finished psnr = 26.01 (19.7 examples/sec; 0.051 sec/batch) +Initialized with PSNR: 27.03897411812029 +Finished psnr = 30.47 (19.6 examples/sec; 0.051 sec/batch) +Initialized with PSNR: 19.014816037693265 +Finished psnr = 25.69 (19.9 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 12.90457146806597 +Finished psnr = 21.65 (19.1 examples/sec; 0.052 sec/batch) +Initialized with PSNR: 17.119117047716557 +Finished psnr = 28.03 (19.1 examples/sec; 0.052 sec/batch) +Initialized with PSNR: 17.062774129482317 +Finished psnr = 25.44 (20.5 examples/sec; 0.049 sec/batch) +Initialized with PSNR: 19.525629211675938 +Finished psnr = 26.07 (19.6 examples/sec; 0.051 sec/batch) +Initialized with PSNR: 17.46573433130799 +Finished psnr = 24.58 (20.1 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 19.200946422804122 +Finished psnr = 30.53 (19.9 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 17.729834721734107 +Finished psnr = 25.80 (19.8 examples/sec; 0.051 sec/batch) +Initialized with PSNR: 16.386233469398405 +Finished psnr = 22.66 (18.7 examples/sec; 0.053 sec/batch) +Initialized with PSNR: 15.284389775117308 +Finished psnr = 26.16 (20.1 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 21.66147837510976 +Finished psnr = 29.72 (20.0 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 14.498257231153538 +Finished psnr = 24.46 (20.0 examples/sec; 0.050 sec/batch) +Initialized with PSNR: 16.504633083401984 +Finished psnr = 24.95 (20.3 examples/sec; 0.049 sec/batch) +Initialized with PSNR: 20.94827880438164 +Finished psnr = 27.77 (19.2 examples/sec; 0.052 sec/batch) +Initialized with PSNR: 15.26399971970905 +Finished psnr = 24.52 (20.5 examples/sec; 0.049 sec/batch) +Initialized with PSNR: 19.150046331140278 ```

精度指标

-- Gitee From 8079f814efca3b0d677f59ade76f641ee3746b1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=BA=E6=B0=B4=E7=BE=8E=E4=BA=BA=E9=B1=BC?= <522341892@qq.com> Date: Thu, 12 May 2022 09:04:07 +0000 Subject: [PATCH 3/9] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Tens?= =?UTF-8?q?orFlow/contrib/cv/DMSP=5FID1290=5Ffor=5FTensorflow/src/DMSPDebl?= =?UTF-8?q?ur.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/DMSPDeblur.py | 169 ------------------ 1 file changed, 169 deletions(-) delete mode 100644 TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py diff --git a/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py b/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py deleted file mode 100644 index 56481f471..000000000 --- a/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import time -import matplotlib as mpl -mpl.use('Agg') -import numpy as np -import scipy.signal as sig -import tensorflow as tf - - -def computePSNR(img1, img2, pad_y, pad_x): - """ Computes peak signal-to-noise ratio between two images. - Input: - img1: First image in range of [0, 255]. - img2: Second image in range of [0, 255]. - pad_y: Scalar radius to exclude boundaries from contributing to PSNR computation in vertical direction. - pad_x: Scalar radius to exclude boundaries from contributing to PSNR computation in horizontal direction. - - Output: PSNR """ - - img1_u = (np.clip(np.squeeze(img1), 0, 255.0)[pad_y:-pad_y,pad_x:-pad_x,:]).astype(dtype=np.uint8) - img2_u = (np.clip(np.squeeze(img2), 0, 255.0)[pad_y:-pad_y,pad_x:-pad_x,:]).astype(dtype=np.uint8) - imdiff = (img1_u).astype(dtype=np.float32) - (img2_u).astype(dtype=np.float32) - rmse = np.sqrt(np.mean(np.power(imdiff[:], 2))) - return 20.0 * np.log10(255.0 / rmse) - -def filter_image(image, kernel, mode='valid'): - """ Implements color filtering (convolution using a flipped kernel) """ - chs = [] - for d in range(image.shape[2]): - # channel = sig.convolve2d(image[:,:,d], np.flipud(np.fliplr(kernel)), mode=mode) - channel = sig.fftconvolve(image[:, :, d], np.flipud(np.fliplr(kernel)), mode=mode) - chs.append(channel) - return np.stack(chs, axis=2) - -def convolve_image(image, kernel, mode='valid'): - """ Implements color image convolution """ - chs = [] - for d in range(image.shape[2]): - # channel = sig.convolve2d(image[:,:,d], kernel, mode=mode) - channel = sig.fftconvolve(image[:, :, d], kernel, mode=mode) - chs.append(channel) - return np.stack(chs, axis=2) - - -def DMSPDeblur(degraded, kernel, sigma_d, params): - """ Implements stochastic gradient descent (SGD) Bayes risk minimization for image deblurring described in: - "Deep Mean-Shift Priors for Image Restoration" (http://home.inf.unibe.ch/~bigdeli/DMSPrior.html) - S. A. Bigdeli, M. Jin, P. Favaro, M. Zwicker, Advances in Neural Information Processing Systems (NIPS), 2017 - - Input: - degraded: Observed degraded RGB input image in range of [0, 255]. - kernel: Blur kernel (internally flipped for convolution). - sigma_d: Noise standard deviation. (set to -1 for noise-blind deblurring) - params: Set of parameters. - params.denoiser: The denoiser function hanlde. - - Optional parameters: - params.sigma_dae: The standard deviation of the denoiser training noise. default: 11 - params.num_iter: Specifies number of iterations. - params.mu: The momentum for SGD optimization. default: 0.9 - params.alpha the step length in SGD optimization. default: 0.1 - - Outputs: - res: Solution.""" - - if 'denoiser' not in params: - raise ValueError('Need a denoiser in params.denoiser!') - - if 'gt' in params: - print_iter = True - else: - print_iter = False - - if 'sigma_dae' not in params: - params['sigma_dae'] = 11.0 - - if 'num_iter' not in params: - params['num_iter'] = 10 - - if 'mu' not in params: - params['mu'] = 0.9 - - if 'alpha' not in params: - params['alpha'] = 0.1 - - pad_y = np.floor(kernel.shape[0] / 2.0).astype(np.int64) - pad_x = np.floor(kernel.shape[1] / 2.0).astype(np.int64) - res = np.pad(degraded, pad_width=((pad_y, pad_y), (pad_x, pad_x), (0, 0)), mode='edge').astype(np.float32) - - step = np.zeros(res.shape) - - if print_iter: - psnr = computePSNR(params['gt'], res, pad_y, pad_x) - print('Initialized with PSNR: ' + str(psnr)) - - for iter in range(params['num_iter']): - if print_iter: - # print('Running iteration: ' + str(iter)) - t = time.time() - - # compute prior gradient - noise = np.random.normal(0.0, params['sigma_dae'], res.shape).astype(np.float32) - - #离线推理 - input_image = res + noise - input_image.tofile("/cache/model/dmsp_input_image.bin") # 处理后的图片保存为bin文件 - - rec = params['denoiser'].denoise(res + noise,False) - prior_grad = res - rec - - # compute data gradient - map_conv = filter_image(res, kernel) - data_err = map_conv - degraded - data_grad = convolve_image(data_err, kernel, mode='full') - - relative_weight = 0.5 - if sigma_d < 0: - sigma2 = 2 * params['sigma_dae'] * params['sigma_dae'] - - data_sum = np.sum(np.power(np.int64(data_err[:]), 2)) - kernel_sum = np.sum(np.power(np.int64(kernel[:]), 2)) - lambda_ = (degraded.size) / (data_sum - + degraded.size * sigma2 * kernel_sum) - relative_weight = lambda_ / (lambda_ + 1 / params['sigma_dae'] / params['sigma_dae']) - else: - relative_weight = (1 / sigma_d / sigma_d) / ( - 1 / sigma_d / sigma_d + 1 / params['sigma_dae'] / params['sigma_dae']) - # sum the gradients - grad_joint = data_grad * relative_weight + prior_grad * (1 - relative_weight) - - # update - step = params['mu'] * step - params['alpha'] * grad_joint - res = res + step - res = np.minimum(255.0, np.maximum(0, res)).astype(np.float32) - - psnr = computePSNR(params['gt'], res, pad_y, pad_x) - if print_iter: - # print ('PSNR is: ' + str(psnr) + ', iteration finished in ' + str(time.time() - t) + ' seconds') - print('Finished psnr = %.2f (%.1f examples/sec; %.3f sec/batch)' % ( - ( psnr, 1 / (time.time() - t), (time.time() - t)))) - - - return res,psnr -- Gitee From eafb1d44faf6690d456f514752cdb19416b5389d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=BA=E6=B0=B4=E7=BE=8E=E4=BA=BA=E9=B1=BC?= <522341892@qq.com> Date: Thu, 12 May 2022 09:05:27 +0000 Subject: [PATCH 4/9] =?UTF-8?q?dmsp=E6=80=A7=E8=83=BD=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/DMSPDeblur.py | 142 ++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py diff --git a/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py b/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py new file mode 100644 index 000000000..cd62db41b --- /dev/null +++ b/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py @@ -0,0 +1,142 @@ +import time +import matplotlib as mpl +mpl.use('Agg') +import numpy as np +import scipy.signal as sig +import tensorflow as tf + + +def computePSNR(img1, img2, pad_y, pad_x): + """ Computes peak signal-to-noise ratio between two images. + Input: + img1: First image in range of [0, 255]. + img2: Second image in range of [0, 255]. + pad_y: Scalar radius to exclude boundaries from contributing to PSNR computation in vertical direction. + pad_x: Scalar radius to exclude boundaries from contributing to PSNR computation in horizontal direction. + + Output: PSNR """ + + img1_u = (np.clip(np.squeeze(img1), 0, 255.0)[pad_y:-pad_y,pad_x:-pad_x,:]).astype(dtype=np.uint8) + img2_u = (np.clip(np.squeeze(img2), 0, 255.0)[pad_y:-pad_y,pad_x:-pad_x,:]).astype(dtype=np.uint8) + imdiff = (img1_u).astype(dtype=np.float32) - (img2_u).astype(dtype=np.float32) + rmse = np.sqrt(np.mean(np.power(imdiff[:], 2))) + return 20.0 * np.log10(255.0 / rmse) + +def filter_image(image, kernel, mode='valid'): + """ Implements color filtering (convolution using a flipped kernel) """ + chs = [] + for d in range(image.shape[2]): + # channel = sig.convolve2d(image[:,:,d], np.flipud(np.fliplr(kernel)), mode=mode) + channel = sig.fftconvolve(image[:, :, d], np.flipud(np.fliplr(kernel)), mode=mode) + chs.append(channel) + return np.stack(chs, axis=2) + +def convolve_image(image, kernel, mode='valid'): + """ Implements color image convolution """ + chs = [] + for d in range(image.shape[2]): + # channel = sig.convolve2d(image[:,:,d], kernel, mode=mode) + channel = sig.fftconvolve(image[:, :, d], kernel, mode=mode) + chs.append(channel) + return np.stack(chs, axis=2) + + +def DMSPDeblur(degraded, kernel, sigma_d, params): + """ Implements stochastic gradient descent (SGD) Bayes risk minimization for image deblurring described in: + "Deep Mean-Shift Priors for Image Restoration" (http://home.inf.unibe.ch/~bigdeli/DMSPrior.html) + S. A. Bigdeli, M. Jin, P. Favaro, M. Zwicker, Advances in Neural Information Processing Systems (NIPS), 2017 + + Input: + degraded: Observed degraded RGB input image in range of [0, 255]. + kernel: Blur kernel (internally flipped for convolution). + sigma_d: Noise standard deviation. (set to -1 for noise-blind deblurring) + params: Set of parameters. + params.denoiser: The denoiser function hanlde. + + Optional parameters: + params.sigma_dae: The standard deviation of the denoiser training noise. default: 11 + params.num_iter: Specifies number of iterations. + params.mu: The momentum for SGD optimization. default: 0.9 + params.alpha the step length in SGD optimization. default: 0.1 + + Outputs: + res: Solution.""" + + if 'denoiser' not in params: + raise ValueError('Need a denoiser in params.denoiser!') + + if 'gt' in params: + print_iter = True + else: + print_iter = False + + if 'sigma_dae' not in params: + params['sigma_dae'] = 11.0 + + if 'num_iter' not in params: + params['num_iter'] = 10 + + if 'mu' not in params: + params['mu'] = 0.9 + + if 'alpha' not in params: + params['alpha'] = 0.1 + + pad_y = np.floor(kernel.shape[0] / 2.0).astype(np.int64) + pad_x = np.floor(kernel.shape[1] / 2.0).astype(np.int64) + res = np.pad(degraded, pad_width=((pad_y, pad_y), (pad_x, pad_x), (0, 0)), mode='edge').astype(np.float32) + + step = np.zeros(res.shape) + + if print_iter: + psnr = computePSNR(params['gt'], res, pad_y, pad_x) + print('Initialized with PSNR: ' + str(psnr)) + + for iter in range(params['num_iter']): + if print_iter: + # print('Running iteration: ' + str(iter)) + t = time.time() + + # compute prior gradient + noise = np.random.normal(0.0, params['sigma_dae'], res.shape).astype(np.float32) + + #离线推理 + input_image = res + noise + input_image.tofile("/cache/model/dmsp_input_image.bin") # 处理后的图片保存为bin文件 + + rec = params['denoiser'].denoise(res + noise,False) + prior_grad = res - rec + + # compute data gradient + map_conv = filter_image(res, kernel) + data_err = map_conv - degraded + data_grad = convolve_image(data_err, kernel, mode='full') + + relative_weight = 0.5 + if sigma_d < 0: + sigma2 = 2 * params['sigma_dae'] * params['sigma_dae'] + + data_sum = np.sum(np.power(np.int64(data_err[:]), 2)) + kernel_sum = np.sum(np.power(np.int64(kernel[:]), 2)) + lambda_ = (degraded.size) / (data_sum + + degraded.size * sigma2 * kernel_sum) + relative_weight = lambda_ / (lambda_ + 1 / params['sigma_dae'] / params['sigma_dae']) + else: + relative_weight = (1 / sigma_d / sigma_d) / ( + 1 / sigma_d / sigma_d + 1 / params['sigma_dae'] / params['sigma_dae']) + # sum the gradients + grad_joint = data_grad * relative_weight + prior_grad * (1 - relative_weight) + + # update + step = params['mu'] * step - params['alpha'] * grad_joint + res = res + step + res = np.minimum(255.0, np.maximum(0, res)).astype(np.float32) + + psnr = computePSNR(params['gt'], res, pad_y, pad_x) + if print_iter: + # print ('PSNR is: ' + str(psnr) + ', iteration finished in ' + str(time.time() - t) + ' seconds') + print('Finished psnr = %.2f (%.1f examples/sec; %.3f sec/batch)' % ( + ( psnr, 1 / (time.time() - t), (time.time() - t)))) + + + return res,psnr -- Gitee From 53fb419c72ec3d8fcb1d5f9eb28d526bbd151f8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=BA=E6=B0=B4=E7=BE=8E=E4=BA=BA=E9=B1=BC?= <522341892@qq.com> Date: Thu, 12 May 2022 09:05:50 +0000 Subject: [PATCH 5/9] update TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py. --- .../src/DMSPDeblur.py | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py b/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py index cd62db41b..56481f471 100644 --- a/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py +++ b/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py @@ -1,3 +1,30 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import time import matplotlib as mpl mpl.use('Agg') -- Gitee From 15e30f57fc9e571908dc8e077dc7910d623ffb49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=BA=E6=B0=B4=E7=BE=8E=E4=BA=BA=E9=B1=BC?= <522341892@qq.com> Date: Wed, 18 May 2022 11:06:37 +0000 Subject: [PATCH 6/9] update TensorFlow/contrib/cv/CSGM_ID2109_for_TensorFlow/README.md. --- .../cv/CSGM_ID2109_for_TensorFlow/README.md | 104 +++++++++++------- 1 file changed, 67 insertions(+), 37 deletions(-) diff --git a/TensorFlow/contrib/cv/CSGM_ID2109_for_TensorFlow/README.md b/TensorFlow/contrib/cv/CSGM_ID2109_for_TensorFlow/README.md index e8bd39148..12f47ff8f 100644 --- a/TensorFlow/contrib/cv/CSGM_ID2109_for_TensorFlow/README.md +++ b/TensorFlow/contrib/cv/CSGM_ID2109_for_TensorFlow/README.md @@ -112,6 +112,7 @@ To train: a.请用户自行准备好数据集,包含训练集和验证集两部分,数据集包括Mnist等,包含train和 val两部分。以Mnist数据集为例。 b.上传数据压缩包到训练环境上,无需解压 +``` ``` ├── /datasets/mnist @@ -166,43 +167,72 @@ Extracting cache/dataset/t10k-images-idx3-ubyte.gz Extracting cache/dataset/t10k-labels-idx1-ubyte.gz start training 2021-12-08 13:13:49.596612: I /home/jenkins/agent/workspace/Compile_GraphEngine_Centos_ARM/tensorflow/tf_adapter/kernels/geop_npu.cc:765] The model has been compiled on the Ascend AI processor, current graph id is:11 -step 0, loss = 621.13 (3.9 examples/sec; 25.653 sec/batch) -step 0, loss = 473.88 (15663.8 examples/sec; 0.006 sec/batch) -step 0, loss = 393.31 (22854.8 examples/sec; 0.004 sec/batch) -step 0, loss = 329.65 (23974.3 examples/sec; 0.004 sec/batch) -step 0, loss = 269.96 (24415.3 examples/sec; 0.004 sec/batch) -step 0, loss = 244.54 (24506.6 examples/sec; 0.004 sec/batch) -step 0, loss = 240.48 (23996.2 examples/sec; 0.004 sec/batch) -step 0, loss = 232.23 (24784.6 examples/sec; 0.004 sec/batch) -step 0, loss = 231.09 (25579.7 examples/sec; 0.004 sec/batch) -step 0, loss = 227.22 (26723.8 examples/sec; 0.004 sec/batch) -step 0, loss = 234.37 (24330.3 examples/sec; 0.004 sec/batch) -step 0, loss = 228.46 (23971.6 examples/sec; 0.004 sec/batch) -step 0, loss = 224.78 (25015.2 examples/sec; 0.004 sec/batch) -step 0, loss = 237.21 (24771.5 examples/sec; 0.004 sec/batch) -step 0, loss = 228.91 (24708.7 examples/sec; 0.004 sec/batch) -step 0, loss = 218.84 (24153.8 examples/sec; 0.004 sec/batch) -step 0, loss = 213.00 (24834.5 examples/sec; 0.004 sec/batch) -step 0, loss = 214.56 (25046.6 examples/sec; 0.004 sec/batch) -step 0, loss = 217.74 (25662.7 examples/sec; 0.004 sec/batch) -step 0, loss = 217.85 (24633.3 examples/sec; 0.004 sec/batch) -step 0, loss = 210.44 (24662.2 examples/sec; 0.004 sec/batch) -step 0, loss = 208.78 (24595.7 examples/sec; 0.004 sec/batch) -step 0, loss = 208.42 (24978.0 examples/sec; 0.004 sec/batch) -step 0, loss = 210.64 (24394.0 examples/sec; 0.004 sec/batch) -step 0, loss = 210.62 (24500.9 examples/sec; 0.004 sec/batch) -step 0, loss = 214.60 (25016.7 examples/sec; 0.004 sec/batch) -step 0, loss = 207.89 (24577.0 examples/sec; 0.004 sec/batch) -step 0, loss = 202.35 (24890.5 examples/sec; 0.004 sec/batch) -step 0, loss = 205.51 (24786.1 examples/sec; 0.004 sec/batch) -step 0, loss = 211.48 (23782.6 examples/sec; 0.004 sec/batch) -step 0, loss = 216.67 (24902.4 examples/sec; 0.004 sec/batch) -step 0, loss = 212.29 (24771.5 examples/sec; 0.004 sec/batch) -step 0, loss = 204.17 (24849.2 examples/sec; 0.004 sec/batch) -step 0, loss = 210.40 (24969.1 examples/sec; 0.004 sec/batch) -step 0, loss = 210.41 (29371.9 examples/sec; 0.003 sec/batch) -step 0, loss = 203.19 (28197.0 examples/sec; 0.004 sec/batch) -step 0, loss = 210.02 (28771.5 examples/sec; 0.003 sec/batch) +step 12, loss = 103.19 (51654.0 examples/sec; 0.002 sec/batch) +step 12, loss = 103.04 (53065.6 examples/sec; 0.002 sec/batch) +step 12, loss = 98.33 (51807.1 examples/sec; 0.002 sec/batch) +step 12, loss = 100.44 (52187.4 examples/sec; 0.002 sec/batch) +step 12, loss = 98.23 (52911.6 examples/sec; 0.002 sec/batch) +step 12, loss = 103.22 (52083.7 examples/sec; 0.002 sec/batch) +step 12, loss = 102.44 (51974.0 examples/sec; 0.002 sec/batch) +step 12, loss = 103.07 (52389.5 examples/sec; 0.002 sec/batch) +step 12, loss = 99.82 (52435.4 examples/sec; 0.002 sec/batch) +step 12, loss = 100.67 (52402.6 examples/sec; 0.002 sec/batch) +step 12, loss = 103.34 (52851.6 examples/sec; 0.002 sec/batch) +step 12, loss = 104.92 (52632.8 examples/sec; 0.002 sec/batch) +step 12, loss = 99.91 (51539.7 examples/sec; 0.002 sec/batch) +step 12, loss = 101.96 (53410.2 examples/sec; 0.002 sec/batch) +step 12, loss = 103.25 (53166.5 examples/sec; 0.002 sec/batch) +step 12, loss = 107.41 (53335.5 examples/sec; 0.002 sec/batch) +step 12, loss = 106.33 (53546.6 examples/sec; 0.002 sec/batch) +step 12, loss = 104.44 (52291.5 examples/sec; 0.002 sec/batch) +step 12, loss = 97.90 (51609.5 examples/sec; 0.002 sec/batch) +step 12, loss = 101.81 (52298.1 examples/sec; 0.002 sec/batch) +step 12, loss = 104.01 (51590.5 examples/sec; 0.002 sec/batch) +step 12, loss = 98.72 (52032.1 examples/sec; 0.002 sec/batch) +step 12, loss = 98.53 (53200.2 examples/sec; 0.002 sec/batch) +step 12, loss = 98.74 (52317.6 examples/sec; 0.002 sec/batch) +step 12, loss = 105.42 (52232.9 examples/sec; 0.002 sec/batch) +step 12, loss = 103.77 (52665.8 examples/sec; 0.002 sec/batch) +step 12, loss = 102.57 (52396.1 examples/sec; 0.002 sec/batch) +step 12, loss = 99.36 (53234.0 examples/sec; 0.002 sec/batch) +step 12, loss = 101.95 (53615.0 examples/sec; 0.002 sec/batch) +step 12, loss = 105.18 (52422.2 examples/sec; 0.002 sec/batch) +step 12, loss = 102.93 (51704.9 examples/sec; 0.002 sec/batch) +step 12, loss = 100.61 (52369.9 examples/sec; 0.002 sec/batch) +step 12, loss = 106.17 (51225.0 examples/sec; 0.002 sec/batch) +step 12, loss = 102.04 (51813.5 examples/sec; 0.002 sec/batch) +step 12, loss = 107.66 (52369.9 examples/sec; 0.002 sec/batch) +step 12, loss = 109.57 (51673.1 examples/sec; 0.002 sec/batch) +step 12, loss = 104.66 (51388.2 examples/sec; 0.002 sec/batch) +step 12, loss = 101.40 (52514.1 examples/sec; 0.002 sec/batch) +step 12, loss = 99.98 (51337.9 examples/sec; 0.002 sec/batch) +step 12, loss = 103.62 (51916.1 examples/sec; 0.002 sec/batch) +step 12, loss = 101.46 (53105.9 examples/sec; 0.002 sec/batch) +step 12, loss = 104.52 (52599.7 examples/sec; 0.002 sec/batch) +step 12, loss = 99.36 (52396.1 examples/sec; 0.002 sec/batch) +step 12, loss = 95.70 (52884.9 examples/sec; 0.002 sec/batch) +step 12, loss = 103.42 (47148.2 examples/sec; 0.002 sec/batch) +step 12, loss = 102.05 (50889.4 examples/sec; 0.002 sec/batch) +step 12, loss = 104.18 (52441.9 examples/sec; 0.002 sec/batch) +step 12, loss = 102.44 (52109.6 examples/sec; 0.002 sec/batch) +step 12, loss = 101.26 (51546.1 examples/sec; 0.002 sec/batch) +step 12, loss = 102.05 (52468.2 examples/sec; 0.002 sec/batch) +step 12, loss = 102.58 (52051.4 examples/sec; 0.002 sec/batch) +step 12, loss = 99.98 (52540.4 examples/sec; 0.002 sec/batch) +step 12, loss = 103.54 (53553.4 examples/sec; 0.002 sec/batch) +step 12, loss = 102.53 (53342.3 examples/sec; 0.002 sec/batch) +step 12, loss = 104.03 (52402.6 examples/sec; 0.002 sec/batch) +step 12, loss = 99.35 (52805.0 examples/sec; 0.002 sec/batch) +step 12, loss = 104.60 (51903.3 examples/sec; 0.002 sec/batch) +step 12, loss = 104.71 (51896.9 examples/sec; 0.002 sec/batch) +step 12, loss = 103.18 (52945.0 examples/sec; 0.002 sec/batch) +step 12, loss = 98.93 (52019.1 examples/sec; 0.002 sec/batch) +step 12, loss = 101.61 (51858.4 examples/sec; 0.002 sec/batch) +step 12, loss = 105.64 (52187.4 examples/sec; 0.002 sec/batch) +step 12, loss = 98.17 (52070.8 examples/sec; 0.002 sec/batch) +step 12, loss = 101.07 (51698.6 examples/sec; 0.002 sec/batch) +step 12, loss = 96.97 (52858.3 examples/sec; 0.002 sec/batch) +step 12, loss = 101.28 (52045.0 examples/sec; 0.002 sec/batch) ```

精度指标

-- Gitee From 97481a458c34d9ab6d94ebe4d795bc93772a7b38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=BA=E6=B0=B4=E7=BE=8E=E4=BA=BA=E9=B1=BC?= <522341892@qq.com> Date: Wed, 18 May 2022 11:14:10 +0000 Subject: [PATCH 7/9] update TensorFlow/contrib/cv/CSGM_ID2109_for_TensorFlow/mnist_vae/src/main.py. --- .../cv/CSGM_ID2109_for_TensorFlow/mnist_vae/src/main.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/TensorFlow/contrib/cv/CSGM_ID2109_for_TensorFlow/mnist_vae/src/main.py b/TensorFlow/contrib/cv/CSGM_ID2109_for_TensorFlow/mnist_vae/src/main.py index 57e1d382f..8fb84d405 100644 --- a/TensorFlow/contrib/cv/CSGM_ID2109_for_TensorFlow/mnist_vae/src/main.py +++ b/TensorFlow/contrib/cv/CSGM_ID2109_for_TensorFlow/mnist_vae/src/main.py @@ -43,7 +43,13 @@ import time from npu_bridge.npu_init import * flags = tf.flags FLAGS = flags.FLAGS - +config = tf.ConfigProto() +custom_op = config.graph_options.rewrite_options.custom_optimizers.add() +custom_op.name = "NpuOptimizer" +custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") +#关闭日志级别 +os.system('export ASCEND_SLOG_PRINT_TO_STDOUT=0') +os.system('export ASCEND_GLOBAL_LOG_LEVEL=3') #import argparse #import moxing as mox # 解析输入参数data_url -- Gitee From f761a8b55e93b7c31d5d71eba8f90b93e6543058 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=BA=E6=B0=B4=E7=BE=8E=E4=BA=BA=E9=B1=BC?= <522341892@qq.com> Date: Sun, 22 May 2022 02:03:04 +0000 Subject: [PATCH 8/9] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Tens?= =?UTF-8?q?orFlow/contrib/cv/DMSP=5FID1290=5Ffor=5FTensorflow/README.md?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cv/DMSP_ID1290_for_Tensorflow/README.md | 264 ------------------ 1 file changed, 264 deletions(-) delete mode 100644 TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/README.md diff --git a/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/README.md b/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/README.md deleted file mode 100644 index d5f4366cd..000000000 --- a/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/README.md +++ /dev/null @@ -1,264 +0,0 @@ -DMSP - -- [基本信息](#基本信息.md) -- [概述](#概述.md) -- [默认配置](#默认配置.md) -- [训练过程](#训练过程.md) -- [精度指标](#精度指标.md) -

基本信息

- -**发布者(Publisher):Huawei** - -**应用领域(Application Domain):Image Restorationg** - -**版本(Version):1.1** - -**修改时间(Modified) :2022.0224.20** - -**大小(Size):25M** - -**框架(Framework):TensorFlow 1.15.0** - -**模型格式(Model Format):ckpt** - -**精度(Precision):Mixed** - -**处理器(Processor):昇腾910** - -**应用级别(Categories):Official** - -**描述(Description):基于TensorFlow框架的用于图像恢复的深度均值偏移先验代码** - -

概述

- -在本文中,作者介绍了一个自然图像先验,它直接表示自然图像分布的高斯平滑版本。 作者将先验包含在图像恢复的公式中,作为贝叶斯估计器,这允许解决噪声盲图像恢复问题。 实验表明先验梯度对应于自然图像分布上的均值偏移向量。 此外,作者使用去噪自编码器学习均值偏移向量场,并将其用于梯度下降方法以执行贝叶斯风险最小化。 论文展示了噪声盲去模糊、超分辨率和去马赛克的竞争结果 -- 参考论文: - - [Deep Mean-Shift Priors for Image Restoration](https://arxiv.org/abs/1709.03749) - -- 参考实现: - - - -- Tensorflow的实现: - - [https://github.com/siavashBigdeli/DMSP-tensorflow](https://github.com/siavashBigdeli/DMSP-tensorflow) - - -- 通过Git获取对应commit\_id的代码方法如下: - - ``` - git clone {repository_url} # 克隆仓库的代码 - cd {repository_name} # 切换到模型的代码仓目录 - git checkout {branch} # 切换到对应分支 - git reset --hard {commit_id} # 代码设置到对应的commit_id - cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换 -## 默认配置 - -- 训练数据集预处理(以ImageNet训练集为例,仅作为用户参考示例): - - - 图像的输入尺寸统一为(180,180,3) - -- 测试数据集预处理(以Berkeley验证集为例,仅作为用户参考示例) - - - 图像的输入尺寸为统一为(180,180,3) - -- 训练超参 - - - Batch size: 1 - - Gaussian noise levels = 11 - - Learning rate(LR): 0.01 - - momentum : 0.9 - - Optimizer: SGD with Momentum - - Train epoch: 300 iterations - - **npu实现:** - -##### 支持特性 - -支持混合精度训练,脚本中默认开启了混合精度,参考示例,见“开启混合精度”。 - -##### 混合精度训练 - -昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。 - --开启混合精度 - -``` -custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") -``` - - -### 准备工作 - -##### 训练环境的准备 - -硬件环境:Ascend: 1*Ascend 910(32GB) | ARM: 24 核 96GB - -运行环境:ascend-share/5.0.3.alpha005_tensorflow-ascend910-cp37-euleros2.8-aarch64-training:1.15.0-21.0.2_1101 - -``` -## 训练 - -To train: - -#### 环境依赖 - -制作数据集的环境上已安装Python3.7和TensorFlow 1.15.0。 - -#### 操作步骤 - -1. 数据集准备。 - - a.请用户自行准备好数据集,包含训练集和验证集两部分,数据集包括Mnist等,包含train和 val两部分。以Mnist数据集为例。 - - b.上传数据压缩包到训练环境上,无需解压 - - - ├── /datasets/imagenet - │ ├──imagenet - │ ├──Berkeley - ``` - - ``` -## 脚本和示例代码 - -``` -``` -├── src -│ ├──config.py //训练定义 -│ ├──DAE.py //模型定义 -│ ├──DAE_model.py //重载模型 -│ ├──demo_DMSP.py //主程序 -│ ├──DMSPDeblur.py //先验去噪 -│ ├──network.py //其他功能函数 -│ ├──ops.py //算子定义 -``` -2. 模型训练。 - - 运行脚本如下: - -```shell -$ python ./src/demo_DMSP.py -``` - -3. 使用pycharm在ModelArts训练启动文件为: - - ``` - /src/demo_DMSP.py - ``` -## 训练过程 - -通过“模型训练”中的训练指令启动单卡训练 -``` -WARNING:tensorflow:From /usr/local/Ascend/tfplugin/latest/tfplugin/python/site-packages/npu_bridge/estimator/npu/npu_optimizer.py:127: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead. - -WARNING:tensorflow:From demo_DMSP.py:25: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead. - -WARNING:tensorflow:From demo_DMSP.py:51: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead. - -2022-03-21 23:33:35.537585: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 AVX512F FMA -2022-03-21 23:33:35.575026: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2600000000 Hz -2022-03-21 23:33:35.580988: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x559177050270 initialized for platform Host (this does not guarantee that XLA will be used). Devices: -2022-03-21 23:33:35.581050: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version -2022-03-21 23:33:35.584501: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/Ascend/ascend-toolkit/latest/fwkacllib/lib64:/usr/lib:/usr/local/python3.7.5/lib: -2022-03-21 23:33:35.584541: E tensorflow/stream_executor/cuda/cuda_driver.cc:318] failed call to cuInit: UNKNOWN ERROR (303) -2022-03-21 23:33:35.584575: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (ubuntu): /proc/driver/nvidia/version does not exist -============start non-blind deblurring on Berkeley segmentation dataset============== -WARNING:tensorflow:From /home/test_user05/dmsp-tensorflow/DAE_model.py:37: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead. - -WARNING:tensorflow:From /home/test_user05/dmsp-tensorflow/DAE_model.py:38: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead. - -WARNING:tensorflow:From /home/test_user05/dmsp-tensorflow/DAE_model.py:42: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead. - -WARNING:tensorflow:From /home/test_user05/dmsp-tensorflow/DAE_model.py:7: calling Constant.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version. -Instructions for updating: -Call initializer instance with the dtype argument instead of passing it to the constructor -WARNING:tensorflow:From /home/test_user05/dmsp-tensorflow/DAE_model.py:8: The name tf.get_variable is deprecated. Please use tf.compat.v1.get_variable instead. - -====================dae================ -{'layer0': , 'layer1': , 'layer2': , 'layer3': , 'layer4': , 'layer5': , 'layer6': , 'layer7': , 'layer8': , 'layer9': , 'layer10': , 'layer11': , 'layer12': , 'layer13': , 'layer14': , 'layer15': , 'layer16': , 'layer17': , 'layer18': , 'layer19': , 'layer20': , 'layer21': , 'layer22': , 'layer23': , 'layer24': , 'layer25': , 'layer26': , 'layer27': , 'layer28': , 'layer29': , 'layer30': , 'layer31': , 'layer32': , 'layer33': , 'layer34': , 'layer35': , 'layer36': , 'layer37': , 'layer38': } -====================dae output========= -Tensor("strided_slice_1:0", shape=(?, ?, ?, 3), dtype=float32) -WARNING:tensorflow:From /home/test_user05/dmsp-tensorflow/DAE_model.py:51: The name tf.global_variables_initializer is deprecated. Please use tf.compat.v1.global_variables_initializer instead. - -2022-03-21 23:33:35.866972: W /home/jenkins/agent/workspace/Compile_GraphEngine_Centos_X86/tensorflow/tf_adapter/util/ge_plugin.cc:124] [GePlugin] can not find Environment variable : JOB_ID -2022-03-21 23:33:39.807011: I /home/jenkins/agent/workspace/Compile_GraphEngine_Centos_X86/tensorflow/tf_adapter/kernels/geop_npu.cc:749] The model has been compiled on the Ascend AI processor, current graph id is:1 -Initialized with PSNR: 18.26756789065104 -2022-03-21 23:33:52.281454: I /home/jenkins/agent/workspace/Compile_GraphEngine_Centos_X86/tensorflow/tf_adapter/kernels/geop_npu.cc:749] The model has been compiled on the Ascend AI processor, current graph id is:11 -Finished psnr = 25.43 (20.0 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 19.61013455418367 -Finished psnr = 29.58 (20.0 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 16.046844525072277 -Finished psnr = 26.21 (19.3 examples/sec; 0.052 sec/batch) -Initialized with PSNR: 19.088294082853533 -Finished psnr = 24.01 (20.3 examples/sec; 0.049 sec/batch) -Initialized with PSNR: 27.903391840839276 -Finished psnr = 33.05 (19.9 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 17.58393445793693 -Finished psnr = 25.87 (19.3 examples/sec; 0.052 sec/batch) -Initialized with PSNR: 21.496189549703043 -Finished psnr = 27.39 (20.3 examples/sec; 0.049 sec/batch) -Initialized with PSNR: 17.183577420828943 -Finished psnr = 24.84 (19.2 examples/sec; 0.052 sec/batch) -Initialized with PSNR: 18.31449854593027 -Finished psnr = 27.68 (20.2 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 14.78985085202309 -Finished psnr = 22.40 (19.9 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 18.795507564810553 -Finished psnr = 27.73 (19.6 examples/sec; 0.051 sec/batch) -Initialized with PSNR: 16.154563492696358 -Finished psnr = 24.16 (19.9 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 19.207686742438906 -Finished psnr = 27.37 (19.9 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 18.436603775139783 -Finished psnr = 27.64 (20.2 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 23.00822262524223 -Finished psnr = 29.49 (20.7 examples/sec; 0.048 sec/batch) -Initialized with PSNR: 15.08249880666261 -Finished psnr = 23.16 (20.5 examples/sec; 0.049 sec/batch) -Initialized with PSNR: 21.944237300656955 -Finished psnr = 29.84 (19.7 examples/sec; 0.051 sec/batch) -Initialized with PSNR: 18.858999334757787 -Finished psnr = 26.01 (19.7 examples/sec; 0.051 sec/batch) -Initialized with PSNR: 27.03897411812029 -Finished psnr = 30.47 (19.6 examples/sec; 0.051 sec/batch) -Initialized with PSNR: 19.014816037693265 -Finished psnr = 25.69 (19.9 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 12.90457146806597 -Finished psnr = 21.65 (19.1 examples/sec; 0.052 sec/batch) -Initialized with PSNR: 17.119117047716557 -Finished psnr = 28.03 (19.1 examples/sec; 0.052 sec/batch) -Initialized with PSNR: 17.062774129482317 -Finished psnr = 25.44 (20.5 examples/sec; 0.049 sec/batch) -Initialized with PSNR: 19.525629211675938 -Finished psnr = 26.07 (19.6 examples/sec; 0.051 sec/batch) -Initialized with PSNR: 17.46573433130799 -Finished psnr = 24.58 (20.1 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 19.200946422804122 -Finished psnr = 30.53 (19.9 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 17.729834721734107 -Finished psnr = 25.80 (19.8 examples/sec; 0.051 sec/batch) -Initialized with PSNR: 16.386233469398405 -Finished psnr = 22.66 (18.7 examples/sec; 0.053 sec/batch) -Initialized with PSNR: 15.284389775117308 -Finished psnr = 26.16 (20.1 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 21.66147837510976 -Finished psnr = 29.72 (20.0 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 14.498257231153538 -Finished psnr = 24.46 (20.0 examples/sec; 0.050 sec/batch) -Initialized with PSNR: 16.504633083401984 -Finished psnr = 24.95 (20.3 examples/sec; 0.049 sec/batch) -Initialized with PSNR: 20.94827880438164 -Finished psnr = 27.77 (19.2 examples/sec; 0.052 sec/batch) -Initialized with PSNR: 15.26399971970905 -Finished psnr = 24.52 (20.5 examples/sec; 0.049 sec/batch) -Initialized with PSNR: 19.150046331140278 -``` -

精度指标

- -精度:psnr: - -|gpu|npu|原论文| -|:----:|:----:|:----:| -|26.06|26.06|26.00| \ No newline at end of file -- Gitee From 6cc4b0d77b4b45799ba6cc2e32cf83fb2d961574 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=BA=BA=E6=B0=B4=E7=BE=8E=E4=BA=BA=E9=B1=BC?= <522341892@qq.com> Date: Sun, 22 May 2022 02:03:24 +0000 Subject: [PATCH 9/9] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Tens?= =?UTF-8?q?orFlow/contrib/cv/DMSP=5FID1290=5Ffor=5FTensorflow/src/DMSPDebl?= =?UTF-8?q?ur.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/DMSPDeblur.py | 169 ------------------ 1 file changed, 169 deletions(-) delete mode 100644 TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py diff --git a/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py b/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py deleted file mode 100644 index 56481f471..000000000 --- a/TensorFlow/contrib/cv/DMSP_ID1290_for_Tensorflow/src/DMSPDeblur.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import time -import matplotlib as mpl -mpl.use('Agg') -import numpy as np -import scipy.signal as sig -import tensorflow as tf - - -def computePSNR(img1, img2, pad_y, pad_x): - """ Computes peak signal-to-noise ratio between two images. - Input: - img1: First image in range of [0, 255]. - img2: Second image in range of [0, 255]. - pad_y: Scalar radius to exclude boundaries from contributing to PSNR computation in vertical direction. - pad_x: Scalar radius to exclude boundaries from contributing to PSNR computation in horizontal direction. - - Output: PSNR """ - - img1_u = (np.clip(np.squeeze(img1), 0, 255.0)[pad_y:-pad_y,pad_x:-pad_x,:]).astype(dtype=np.uint8) - img2_u = (np.clip(np.squeeze(img2), 0, 255.0)[pad_y:-pad_y,pad_x:-pad_x,:]).astype(dtype=np.uint8) - imdiff = (img1_u).astype(dtype=np.float32) - (img2_u).astype(dtype=np.float32) - rmse = np.sqrt(np.mean(np.power(imdiff[:], 2))) - return 20.0 * np.log10(255.0 / rmse) - -def filter_image(image, kernel, mode='valid'): - """ Implements color filtering (convolution using a flipped kernel) """ - chs = [] - for d in range(image.shape[2]): - # channel = sig.convolve2d(image[:,:,d], np.flipud(np.fliplr(kernel)), mode=mode) - channel = sig.fftconvolve(image[:, :, d], np.flipud(np.fliplr(kernel)), mode=mode) - chs.append(channel) - return np.stack(chs, axis=2) - -def convolve_image(image, kernel, mode='valid'): - """ Implements color image convolution """ - chs = [] - for d in range(image.shape[2]): - # channel = sig.convolve2d(image[:,:,d], kernel, mode=mode) - channel = sig.fftconvolve(image[:, :, d], kernel, mode=mode) - chs.append(channel) - return np.stack(chs, axis=2) - - -def DMSPDeblur(degraded, kernel, sigma_d, params): - """ Implements stochastic gradient descent (SGD) Bayes risk minimization for image deblurring described in: - "Deep Mean-Shift Priors for Image Restoration" (http://home.inf.unibe.ch/~bigdeli/DMSPrior.html) - S. A. Bigdeli, M. Jin, P. Favaro, M. Zwicker, Advances in Neural Information Processing Systems (NIPS), 2017 - - Input: - degraded: Observed degraded RGB input image in range of [0, 255]. - kernel: Blur kernel (internally flipped for convolution). - sigma_d: Noise standard deviation. (set to -1 for noise-blind deblurring) - params: Set of parameters. - params.denoiser: The denoiser function hanlde. - - Optional parameters: - params.sigma_dae: The standard deviation of the denoiser training noise. default: 11 - params.num_iter: Specifies number of iterations. - params.mu: The momentum for SGD optimization. default: 0.9 - params.alpha the step length in SGD optimization. default: 0.1 - - Outputs: - res: Solution.""" - - if 'denoiser' not in params: - raise ValueError('Need a denoiser in params.denoiser!') - - if 'gt' in params: - print_iter = True - else: - print_iter = False - - if 'sigma_dae' not in params: - params['sigma_dae'] = 11.0 - - if 'num_iter' not in params: - params['num_iter'] = 10 - - if 'mu' not in params: - params['mu'] = 0.9 - - if 'alpha' not in params: - params['alpha'] = 0.1 - - pad_y = np.floor(kernel.shape[0] / 2.0).astype(np.int64) - pad_x = np.floor(kernel.shape[1] / 2.0).astype(np.int64) - res = np.pad(degraded, pad_width=((pad_y, pad_y), (pad_x, pad_x), (0, 0)), mode='edge').astype(np.float32) - - step = np.zeros(res.shape) - - if print_iter: - psnr = computePSNR(params['gt'], res, pad_y, pad_x) - print('Initialized with PSNR: ' + str(psnr)) - - for iter in range(params['num_iter']): - if print_iter: - # print('Running iteration: ' + str(iter)) - t = time.time() - - # compute prior gradient - noise = np.random.normal(0.0, params['sigma_dae'], res.shape).astype(np.float32) - - #离线推理 - input_image = res + noise - input_image.tofile("/cache/model/dmsp_input_image.bin") # 处理后的图片保存为bin文件 - - rec = params['denoiser'].denoise(res + noise,False) - prior_grad = res - rec - - # compute data gradient - map_conv = filter_image(res, kernel) - data_err = map_conv - degraded - data_grad = convolve_image(data_err, kernel, mode='full') - - relative_weight = 0.5 - if sigma_d < 0: - sigma2 = 2 * params['sigma_dae'] * params['sigma_dae'] - - data_sum = np.sum(np.power(np.int64(data_err[:]), 2)) - kernel_sum = np.sum(np.power(np.int64(kernel[:]), 2)) - lambda_ = (degraded.size) / (data_sum - + degraded.size * sigma2 * kernel_sum) - relative_weight = lambda_ / (lambda_ + 1 / params['sigma_dae'] / params['sigma_dae']) - else: - relative_weight = (1 / sigma_d / sigma_d) / ( - 1 / sigma_d / sigma_d + 1 / params['sigma_dae'] / params['sigma_dae']) - # sum the gradients - grad_joint = data_grad * relative_weight + prior_grad * (1 - relative_weight) - - # update - step = params['mu'] * step - params['alpha'] * grad_joint - res = res + step - res = np.minimum(255.0, np.maximum(0, res)).astype(np.float32) - - psnr = computePSNR(params['gt'], res, pad_y, pad_x) - if print_iter: - # print ('PSNR is: ' + str(psnr) + ', iteration finished in ' + str(time.time() - t) + ' seconds') - print('Finished psnr = %.2f (%.1f examples/sec; %.3f sec/batch)' % ( - ( psnr, 1 / (time.time() - t), (time.time() - t)))) - - - return res,psnr -- Gitee