diff --git a/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/.gitignore b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..894a44cc066a027465cd26d634948d56d13af9af --- /dev/null +++ b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/.gitignore @@ -0,0 +1,104 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ diff --git a/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/.keep b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/Network.py b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/Network.py new file mode 100644 index 0000000000000000000000000000000000000000..34ce3a9f952b6d877fa80ee3442184450d9d3176 --- /dev/null +++ b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/Network.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python +#title :Network.py +#description :Architecture file(Generator and Discriminator) +#author :Deepak Birla +#date :2018/10/30 +#usage :from Network import Generator, Discriminator +#python_version :3.5.4 + +# Modules +from keras.layers import Dense +from keras.layers.core import Activation +from keras.layers.normalization import BatchNormalization +from keras.layers.convolutional import UpSampling2D +from keras.layers.core import Flatten +from keras.layers import Input +from keras.layers.convolutional import Conv2D, Conv2DTranspose +from keras.models import Model +from keras.layers.advanced_activations import LeakyReLU, PReLU +from keras.layers import add + +# Residual block +def res_block_gen(model, kernal_size, filters, strides): + + gen = model + + model = Conv2D(filters = filters, kernel_size = kernal_size, strides = strides, padding = "same")(model) + model = BatchNormalization(momentum = 0.5)(model) + # Using Parametric ReLU + model = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(model) + model = Conv2D(filters = filters, kernel_size = kernal_size, strides = strides, padding = "same")(model) + model = BatchNormalization(momentum = 0.5)(model) + + model = add([gen, model]) + + return model + + +def up_sampling_block(model, kernal_size, filters, strides): + + # In place of Conv2D and UpSampling2D we can also use Conv2DTranspose (Both are used for Deconvolution) + # Even we can have our own function for deconvolution (i.e one made in Utils.py) + #model = Conv2DTranspose(filters = filters, kernel_size = kernal_size, strides = strides, padding = "same")(model) + model = Conv2D(filters = filters, kernel_size = kernal_size, strides = strides, padding = "same")(model) + model = UpSampling2D(size = 2)(model) + model = LeakyReLU(alpha = 0.2)(model) + + return model + + +def discriminator_block(model, filters, kernel_size, strides): + + model = Conv2D(filters = filters, kernel_size = kernel_size, strides = strides, padding = "same")(model) + model = BatchNormalization(momentum = 0.5)(model) + model = LeakyReLU(alpha = 0.2)(model) + + return model + +# Network Architecture is same as given in Paper https://arxiv.org/pdf/1609.04802.pdf +class Generator(object): + + def __init__(self, noise_shape): + + self.noise_shape = noise_shape + + def generator(self): + + gen_input = Input(shape = self.noise_shape) + + model = Conv2D(filters = 64, kernel_size = 9, strides = 1, padding = "same")(gen_input) + model = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(model) + + gen_model = model + + # Using 16 Residual Blocks + for index in range(16): + model = res_block_gen(model, 3, 64, 1) + + model = Conv2D(filters = 64, kernel_size = 3, strides = 1, padding = "same")(model) + model = BatchNormalization(momentum = 0.5)(model) + model = add([gen_model, model]) + + # Using 2 UpSampling Blocks + for index in range(2): + model = up_sampling_block(model, 3, 256, 1) + + model = Conv2D(filters = 3, kernel_size = 9, strides = 1, padding = "same")(model) + model = Activation('tanh')(model) + + generator_model = Model(inputs = gen_input, outputs = model) + + return generator_model + +# Network Architecture is same as given in Paper https://arxiv.org/pdf/1609.04802.pdf +class Discriminator(object): + + def __init__(self, image_shape): + + self.image_shape = image_shape + + def discriminator(self): + + dis_input = Input(shape = self.image_shape) + + model = Conv2D(filters = 64, kernel_size = 3, strides = 1, padding = "same")(dis_input) + model = LeakyReLU(alpha = 0.2)(model) + + model = discriminator_block(model, 64, 3, 2) + model = discriminator_block(model, 128, 3, 1) + model = discriminator_block(model, 128, 3, 2) + model = discriminator_block(model, 256, 3, 1) + model = discriminator_block(model, 256, 3, 2) + model = discriminator_block(model, 512, 3, 1) + model = discriminator_block(model, 512, 3, 2) + + model = Flatten()(model) + model = Dense(1024)(model) + model = LeakyReLU(alpha = 0.2)(model) + + model = Dense(1)(model) + model = Activation('sigmoid')(model) + + discriminator_model = Model(inputs = dis_input, outputs = model) + + return discriminator_model diff --git a/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/Utils.py b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/Utils.py new file mode 100644 index 0000000000000000000000000000000000000000..573ac7818b23797b4c620f9976598e825f341c92 --- /dev/null +++ b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/Utils.py @@ -0,0 +1,262 @@ +#!/usr/bin/env python +#title :Utils.py +#description :Have helper functions to process images and plot images +#author :Deepak Birla +#date :2018/10/30 +#usage :imported in other files +#python_version :3.5.4 + +from keras.layers import Lambda +import tensorflow as tf + +from skimage import io +import numpy as np +from numpy import array +from numpy.random import randint +import cv2 as cv +from PIL import Image +import os +import sys +import matplotlib.pyplot as plt +plt.switch_backend('agg') + +# Subpixel Conv will upsample from (h, w, c) to (h/r, w/r, c/r^2) +def SubpixelConv2D(input_shape, scale=4): + def subpixel_shape(input_shape): + dims = [input_shape[0],input_shape[1] * scale,input_shape[2] * scale,int(input_shape[3] / (scale ** 2))] + output_shape = tuple(dims) + return output_shape + + def subpixel(x): + return tf.depth_to_space(x, scale) + + return Lambda(subpixel, output_shape=subpixel_shape) + +# Takes list of images and provide HR images in form of numpy array +def hr_images(images): + images_hr = array(images,dtype=object) + return images_hr + +# Takes list of images and provide LR images in form of numpy array +def lr_images(images_real , downscale): + + images = [] + for img in range(len(images_real)): + images.append(cv.resize(images_real[img],(0,0),fx=0.25,fy=0.25,interpolation=cv.INTER_CUBIC)) + images_lr = array(images,dtype=object) + return images_lr + +def normalize(input_data): + + return (input_data.astype(np.float32) - 127.5)/127.5 + +def denormalize(input_data): + input_data = (input_data + 1) * 127.5 + return input_data.astype(np.uint8) + + +def load_path(path): + directories = [] + if os.path.isdir(path): + directories.append(path) + for elem in os.listdir(path): + if os.path.isdir(os.path.join(path,elem)): + directories = directories + load_path(os.path.join(path,elem)) + directories.append(os.path.join(path,elem)) + return directories + +def load_data_from_dirs(dirs, ext): + files = [] + file_names = [] + count = 0 + for d in dirs: + for f in os.listdir(d): + if f.endswith(ext): + image = io.imread(os.path.join(d,f)) + if len(image.shape) > 2: + files.append(image) + file_names.append(os.path.join(d,f)) + count = count + 1 + #if count%1000==0 : + #print(count) + #print('finish load img') + return files + +def load_data(directory, ext): + + files = load_data_from_dirs(load_path(directory), ext) + return files + +def load_training_data(directory, ext, number_of_images = 10000, train_test_ratio = 0.8): + + number_of_train_images = int(number_of_images * train_test_ratio) + files = load_data_from_dirs(load_path(directory), ext) + + if len(files) < number_of_images: + #print("Number of image files are less then you specified") + #print("Please reduce number of images to %d" % len(files)) + sys.exit() + + test_array = array(files,dtype=object) + if len(test_array[0].shape) < 3: + #print("Images are of not same shape") + #print("Please provide same shape images") + sys.exit() + + x_train = files[:number_of_train_images] + x_test = files[number_of_train_images:number_of_images] + + x_train_hr = hr_images(x_train) + for i in range(number_of_train_images): + x_train_hr[i] = normalize(x_train_hr[i]) + #if i%1000==0: + #print(i) + #print("x_train_hr finish normalize") + + x_train_lr = lr_images(x_train, 4) + for j in range(number_of_train_images): + x_train_lr[j] = normalize(x_train_lr[j]) + + + x_test_hr = hr_images(x_test) + for k in range(number_of_images-number_of_train_images): + x_test_hr[k] = normalize(x_test_hr[k]) + #if k %1000 == 0: + #print(k) + #print("x_test_hr finish normalize") + + + x_test_lr = lr_images(x_test, 4) + for w in range(number_of_images-number_of_train_images): + x_test_lr[w] = normalize(x_test_lr[w]) + #if w %1000 == 0: + #print(w) + #print("x_test_lr finish normalize") + + + return x_train_lr, x_train_hr, x_test_lr, x_test_hr + + +def load_test_data_for_model(directory, ext, number_of_images = 100): + + files = load_data_from_dirs(load_path(directory), ext) + + if len(files) < number_of_images: + #print("Number of image files are less then you specified") + #print("Please reduce number of images to %d" % len(files)) + sys.exit() + + x_test_hr = hr_images(files) + x_test_hr = normalize(x_test_hr) + + x_test_lr = lr_images(files, 4) + x_test_lr = normalize(x_test_lr) + + return x_test_lr, x_test_hr + +def load_test_data(directory, ext, number_of_images = 100): + + files = load_data_from_dirs(load_path(directory), ext) + + if len(files) < number_of_images: + #print("Number of image files are less then you specified") + #print("Please reduce number of images to %d" % len(files)) + sys.exit() + + x_test_lr = lr_images(files, 4) + x_test_lr = normalize(x_test_lr) + + return x_test_lr + +# While training save generated image(in form LR, SR, HR) +# Save only one image as sample +def plot_generated_images(output_dir, epoch, generator, x_test_hr, x_test_lr , dim=(1, 3), figsize=(15, 5)): + + examples = x_test_hr.shape[0] + #print(examples) + value = randint(0, examples) + image_batch_hr = denormalize(x_test_hr) + image_batch_lr = x_test_lr + gen_img = generator.predict(image_batch_lr,batch_size=16) + generated_image = denormalize(gen_img) + image_batch_lr = denormalize(image_batch_lr) + + plt.figure(figsize=figsize) + + plt.subplot(dim[0], dim[1], 1) + plt.imshow(image_batch_lr[value], interpolation='nearest') + plt.axis('off') + + plt.subplot(dim[0], dim[1], 2) + plt.imshow(generated_image[value], interpolation='nearest') + plt.axis('off') + + plt.subplot(dim[0], dim[1], 3) + plt.imshow(image_batch_hr[value], interpolation='nearest') + plt.axis('off') + + plt.tight_layout() + plt.savefig(output_dir + 'generated_image_%d.png' % epoch) + + #plt.show() + +# Plots and save generated images(in form LR, SR, HR) from model to test the model +# Save output for all images given for testing +def plot_test_generated_images_for_model(output_dir, generator, x_test_hr, x_test_lr , dim=(1, 3), figsize=(15, 5)): + + examples = x_test_hr.shape[0] + image_batch_hr = denormalize(x_test_hr) + image_batch_lr = x_test_lr + gen_img = generator.predict(image_batch_lr) + generated_image = denormalize(gen_img) + image_batch_lr = denormalize(image_batch_lr) + + for index in range(examples): + + plt.figure(figsize=figsize) + + plt.subplot(dim[0], dim[1], 1) + plt.imshow(image_batch_lr[index], interpolation='nearest') + plt.axis('off') + + plt.subplot(dim[0], dim[1], 2) + plt.imshow(generated_image[index], interpolation='nearest') + plt.axis('off') + + plt.subplot(dim[0], dim[1], 3) + plt.imshow(image_batch_hr[index], interpolation='nearest') + plt.axis('off') + + plt.tight_layout() + plt.savefig(output_dir + 'test_generated_image_%d.png' % index) + + #plt.show() + +# Takes LR images and save respective HR images +def plot_test_generated_images(output_dir, generator, x_test_lr, figsize=(5, 5)): + + examples = x_test_lr.shape[0] + #image_batch_lr = denormalize(x_test_lr) + image_batch_lr = x_test_lr + #print(image_batch_lr.shape) + gen_img = generator.predict(image_batch_lr) + generated_image = denormalize(gen_img) + #print(generated_image.shape) + for index in range(examples): + + #plt.figure(figsize=figsize) + + plt.imshow(generated_image[index], interpolation='nearest') + plt.axis('off') + + plt.tight_layout() + plt.savefig(output_dir + 'high_res_result_image_%d.png' % index) + + #plt.show() + + + + + + + diff --git a/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/Utils_model.py b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/Utils_model.py new file mode 100644 index 0000000000000000000000000000000000000000..95811a21ed86c6acee21ee2080f61ff77e7ea549 --- /dev/null +++ b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/Utils_model.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +#title :Utils_model.py +#description :Have functions to get optimizer and loss +#author :Deepak Birla +#date :2018/10/30 +#usage :imported in other files +#python_version :3.5.4 + +from keras.applications.vgg19 import VGG19 +import keras.backend as K +from keras.models import Model +from keras.optimizers import Adam +import tensorflow as tf +from npu_bridge.npu_init import * +class VGG_LOSS(object): + + def __init__(self, image_shape): + + self.image_shape = image_shape + + # computes VGG loss or content loss + def vgg_loss(self, y_true, y_pred): + + vgg19 = VGG19(include_top=False, weights='imagenet', input_shape=self.image_shape) + vgg19.trainable = False + # Make trainable as False + for l in vgg19.layers: + l.trainable = False + model = Model(inputs=vgg19.input, outputs=vgg19.get_layer('block5_conv4').output) + model.trainable = False + + return K.mean(K.square(model(y_true) - model(y_pred))) + +def get_optimizer(): + # adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08) + opt = tf.train.AdamOptimizer(learning_rate=1E-4, beta1=0.9, beta2=0.999, epsilon=1e-08) + + # loss_scale_manager = FixedLossScaleManager(loss_scale=2**10) + loss_scale_manager = ExponentialUpdateLossScaleManager(init_loss_scale=2 ** 32, incr_every_n_steps=500, + decr_every_n_nan_or_inf=2, decr_ratio=0.5) + opt = NPULossScaleOptimizer(opt, loss_scale_manager) + + return opt + diff --git a/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/VGGG.py b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/VGGG.py new file mode 100644 index 0000000000000000000000000000000000000000..c57a6ed9ddbeb42b0d38ce80150fc9a36bc30b68 --- /dev/null +++ b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/VGGG.py @@ -0,0 +1,228 @@ +"""VGG19 model for Keras. + +# Reference + +- [Very Deep Convolutional Networks for Large-Scale Image Recognition]( + https://arxiv.org/abs/1409.1556) (ICLR 2015) + +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from . import get_submodules_from_kwargs +from . import imagenet_utils +from .imagenet_utils import decode_predictions +from .imagenet_utils import _obtain_input_shape + +preprocess_input = imagenet_utils.preprocess_input + +WEIGHTS_PATH = ('https://github.com/fchollet/deep-learning-models/' + 'releases/download/v0.1/' + 'vgg19_weights_tf_dim_ordering_tf_kernels.h5') +WEIGHTS_PATH_NO_TOP = ('https://github.com/fchollet/deep-learning-models/' + 'releases/download/v0.1/' + 'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5') + + +def VGG19(include_top=True, + weights='imagenet', + input_tensor=None, + input_shape=None, + pooling=None, + classes=1000, + **kwargs): + """Instantiates the VGG19 architecture. + + Optionally loads weights pre-trained on ImageNet. + Note that the data format convention used by the model is + the one specified in your Keras config at `~/.keras/keras.json`. + + # Arguments + include_top: whether to include the 3 fully-connected + layers at the top of the network. + weights: one of `None` (random initialization), + 'imagenet' (pre-training on ImageNet), + or the path to the weights file to be loaded. + input_tensor: optional Keras tensor + (i.e. output of `layers.Input()`) + to use as image input for the model. + input_shape: optional shape tuple, only to be specified + if `include_top` is False (otherwise the input shape + has to be `(224, 224, 3)` + (with `channels_last` data format) + or `(3, 224, 224)` (with `channels_first` data format). + It should have exactly 3 inputs channels, + and width and height should be no smaller than 32. + E.g. `(200, 200, 3)` would be one valid value. + pooling: Optional pooling mode for feature extraction + when `include_top` is `False`. + - `None` means that the output of the model will be + the 4D tensor output of the + last convolutional block. + - `avg` means that global average pooling + will be applied to the output of the + last convolutional block, and thus + the output of the model will be a 2D tensor. + - `max` means that global max pooling will + be applied. + classes: optional number of classes to classify images + into, only to be specified if `include_top` is True, and + if no `weights` argument is specified. + + # Returns + A Keras model instance. + + # Raises + ValueError: in case of invalid argument for `weights`, + or invalid input shape. + """ + backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs) + + if not (weights in {'imagenet', None} or os.path.exists(weights)): + raise ValueError('The `weights` argument should be either ' + '`None` (random initialization), `imagenet` ' + '(pre-training on ImageNet), ' + 'or the path to the weights file to be loaded.') + + if weights == 'imagenet' and include_top and classes != 1000: + raise ValueError('If using `weights` as `"imagenet"` with `include_top`' + ' as true, `classes` should be 1000') + # Determine proper input shape + input_shape = _obtain_input_shape(input_shape, + default_size=224, + min_size=32, + data_format=backend.image_data_format(), + require_flatten=include_top, + weights=weights) + + if input_tensor is None: + img_input = layers.Input(shape=input_shape) + else: + if not backend.is_keras_tensor(input_tensor): + img_input = layers.Input(tensor=input_tensor, shape=input_shape) + else: + img_input = input_tensor + # Block 1 + x = layers.Conv2D(64, (3, 3), + activation='relu', + padding='same', + name='block1_conv1')(img_input) + x = layers.Conv2D(64, (3, 3), + activation='relu', + padding='same', + name='block1_conv2')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) + + # Block 2 + x = layers.Conv2D(128, (3, 3), + activation='relu', + padding='same', + name='block2_conv1')(x) + x = layers.Conv2D(128, (3, 3), + activation='relu', + padding='same', + name='block2_conv2')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) + + # Block 3 + x = layers.Conv2D(256, (3, 3), + activation='relu', + padding='same', + name='block3_conv1')(x) + x = layers.Conv2D(256, (3, 3), + activation='relu', + padding='same', + name='block3_conv2')(x) + x = layers.Conv2D(256, (3, 3), + activation='relu', + padding='same', + name='block3_conv3')(x) + x = layers.Conv2D(256, (3, 3), + activation='relu', + padding='same', + name='block3_conv4')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) + + # Block 4 + x = layers.Conv2D(512, (3, 3), + activation='relu', + padding='same', + name='block4_conv1')(x) + x = layers.Conv2D(512, (3, 3), + activation='relu', + padding='same', + name='block4_conv2')(x) + x = layers.Conv2D(512, (3, 3), + activation='relu', + padding='same', + name='block4_conv3')(x) + x = layers.Conv2D(512, (3, 3), + activation='relu', + padding='same', + name='block4_conv4')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) + + # Block 5 + x = layers.Conv2D(512, (3, 3), + activation='relu', + padding='same', + name='block5_conv1')(x) + x = layers.Conv2D(512, (3, 3), + activation='relu', + padding='same', + name='block5_conv2')(x) + x = layers.Conv2D(512, (3, 3), + activation='relu', + padding='same', + name='block5_conv3')(x) + x = layers.Conv2D(512, (3, 3), + activation='relu', + padding='same', + name='block5_conv4')(x) + x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x) + + if include_top: + # Classification block + x = layers.Flatten(name='flatten')(x) + x = layers.Dense(4096, activation='relu', name='fc1')(x) + x = layers.Dense(4096, activation='relu', name='fc2')(x) + x = layers.Dense(classes, activation='softmax', name='predictions')(x) + else: + if pooling == 'avg': + x = layers.GlobalAveragePooling2D()(x) + elif pooling == 'max': + x = layers.GlobalMaxPooling2D()(x) + + # Ensure that the model takes into account + # any potential predecessors of `input_tensor`. + if input_tensor is not None: + inputs = keras_utils.get_source_inputs(input_tensor) + else: + inputs = img_input + # Create model. + model = models.Model(inputs, x, name='vgg19') + + # Load weights. + if weights == 'imagenet': + if include_top: + weights_path = keras_utils.get_file( + 'vgg19_weights_tf_dim_ordering_tf_kernels.h5', + WEIGHTS_PATH, + cache_subdir='models', + file_hash='cbe5617147190e668d6c5d5026f83318') + else: + weights_path = keras_utils.get_file( + 'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', + WEIGHTS_PATH_NO_TOP, + cache_subdir='models', + file_hash='253f8cb515780f3b799900260a226db6') + model.load_weights(weights_path) + if backend.backend() == 'theano': + keras_utils.convert_all_kernels_in_model(model) + elif weights is not None: + model.load_weights(weights) + + return model diff --git a/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/fusion_switch.cfg b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/fusion_switch.cfg new file mode 100644 index 0000000000000000000000000000000000000000..f21fffae8378d8d8feafbcda10198b6ae962234c --- /dev/null +++ b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/fusion_switch.cfg @@ -0,0 +1,10 @@ +{ +"Switch":{ +"GraphFusion":{ +"ALL":"off" +}, +"UBFusion":{ +"ALL":"off" +} +} +} \ No newline at end of file diff --git a/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/modelarts_entry_acc.py b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/modelarts_entry_acc.py new file mode 100644 index 0000000000000000000000000000000000000000..e8fbc67d73d5ff797117e4d3dcb8f2a3aa13b10e --- /dev/null +++ b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/modelarts_entry_acc.py @@ -0,0 +1,65 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import argparse +import sys + + +#print(os.system("pip install keras==2.2.4")) + + +parser = argparse.ArgumentParser() +parser.add_argument("--data_url", type=str, default="/home/ma-user/modelarts/inputs/data_url_0") +parser.add_argument("--train_url", type=str, default="/home/ma-user/modelarts/outputs/train_url_0/") +config = parser.parse_args() + +#print("[CANN-Modelzoo] code_dir path is [%s]" % (sys.path[0])) +code_dir = sys.path[0] +os.chdir(code_dir) +#print("[CANN-Modelzoo] work_dir path is [%s]" % (os.getcwd())) + +#print("[CANN-Modelzoo] before train - list my run files:") +os.system("ls -al /usr/local/Ascend/ascend-toolkit/") + +#print("[CANN-Modelzoo] before train - list my dataset files:") +os.system("ls -al %s" % config.data_url) + +#print("[CANN-Modelzoo] start run train shell") + +os.system("dos2unix ./test/*") + + +os.system("bash ./test/train_full_1p.sh --data_path=%s --output_path=%s " % (config.data_url, config.train_url)) + +#print("[CANN-Modelzoo] finish run train shell") + + +#print("[CANN-Modelzoo] after train - list my output files:") +os.system("cp -r %s %s " % (code_dir, config.train_url)) +os.system("ls -al %s" % config.train_url) diff --git a/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/test.py b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/test.py new file mode 100644 index 0000000000000000000000000000000000000000..a4c56e421756f996f32902781b70d0bcf6d9b4ab --- /dev/null +++ b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/test.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +#title :test.py +#description :to test the model +#author :Deepak Birla +#date :2018/10/30 +#usage :python test.py --options +#python_version :3.5.4 + +from keras.models import Model +import matplotlib.pyplot as plt +import tensorflow as tf +import skimage.transform +from skimage import data, io, filters +import numpy as np +from numpy import array +import os +from keras.models import load_model +from scipy.misc import imresize +import argparse + +import Utils, Utils_model +from Utils_model import VGG_LOSS + +image_shape = (192,192,3) + +def test_model(input_hig_res, model, number_of_images, output_dir): + + x_test_lr, x_test_hr = Utils.load_test_data_for_model(input_hig_res, 'jpg', number_of_images) + Utils.plot_test_generated_images_for_model(output_dir, model, x_test_hr, x_test_lr) + +def test_model_for_lr_images(input_low_res, model, number_of_images, output_dir): + + x_test_lr = Utils.load_test_data(input_low_res, 'jpg', number_of_images) + Utils.plot_test_generated_images(output_dir, model, x_test_lr) + +if __name__== "__main__": + + parser = argparse.ArgumentParser() + + parser.add_argument('-ihr', '--input_hig_res', action='store', dest='input_hig_res', default='./data/' , + help='Path for input images Hig resolution') + + parser.add_argument('-ilr', '--input_low_res', action='store', dest='input_low_res', default='./data_lr/' , + help='Path for input images Low resolution') + + parser.add_argument('-o', '--output_dir', action='store', dest='output_dir', default='./output/' , + help='Path for Output images') + + parser.add_argument('-m', '--model_dir', action='store', dest='model_dir', default='./model/gen_model3000.h5' , + help='Path for model') + + parser.add_argument('-n', '--number_of_images', action='store', dest='number_of_images', default=25 , + help='Number of Images', type=int) + + parser.add_argument('-t', '--test_type', action='store', dest='test_type', default='test_model', + help='Option to test model output or to test low resolution image') + + values = parser.parse_args() + + loss = VGG_LOSS(image_shape) + model = load_model(values.model_dir , custom_objects={'vgg_loss': loss.vgg_loss}) + + if values.test_type == 'test_model': + test_model(values.input_hig_res, model, values.number_of_images, values.output_dir) + + elif values.test_type == 'test_lr_images': + test_model_for_lr_images(values.input_low_res, model, values.number_of_images, values.output_dir) + + else: + print("No such option") + + + + diff --git a/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/test/train_full_1p.sh b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/test/train_full_1p.sh new file mode 100644 index 0000000000000000000000000000000000000000..99285d0ccce3e4dbd8ab7c551c03af3c1f35014e --- /dev/null +++ b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/test/train_full_1p.sh @@ -0,0 +1,225 @@ +#!/bin/bash + +########################################################## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +########################################################## +# shell脚本所在路径 +cur_path=`echo $(cd $(dirname $0);pwd)` + +# 判断当前shell是否是performance +perf_flag=`echo $0 | grep performance | wc -l` + +# 当前执行网络的名称 +Network=`echo $(cd $(dirname $0);pwd) | awk -F"/" '{print $(NF-1)}'` + +export RANK_SIZE=1 +export RANK_ID=0 +export JOB_ID=10087 + +# 路径参数初始化 +data_path="" +output_path="" + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1P.sh " + echo " " + echo "parameter explain: + --data_path # dataset of training + --output_path # output of training + --train_steps # max_step for training + --train_epochs # max_epoch for training + --batch_size # batch size + -h/--help show help message + " + exit 1 +fi + +# 参数校验,不需要修改 +for para in $* +do + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --output_path* ]];then + output_path=`echo ${para#*=}` + elif [[ $para == --train_steps* ]];then + train_steps=`echo ${para#*=}` + elif [[ $para == --train_epochs* ]];then + train_epochs=`echo ${para#*=}` + elif [[ $para == --batch_size* ]];then + batch_size=`echo ${para#*=}` + fi +done + +# 校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + +# 校验是否传入output_path,不需要修改 +if [[ $output_path == "" ]];then + output_path="./test/output/${ASCEND_DEVICE_ID}" +fi + +# 设置打屏日志文件名,请保留,文件名为${print_log} +print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log" +modelarts_flag=${MODELARTS_MODEL_PATH} +if [ x"${modelarts_flag}" != x ]; +then + echo "running without etp..." + print_log_name=`ls /home/ma-user/modelarts/log/ | grep proc-rank` + print_log="/home/ma-user/modelarts/log/${print_log_name}" +fi +echo "### get your log here : ${print_log}" + +CaseName="" +function get_casename() +{ + if [ x"${perf_flag}" = x1 ]; + then + CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'perf' + else + CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'acc' + fi +} + +# 跳转到code目录 +cd ${cur_path}/../ +rm -rf ./test/output/${ASCEND_DEVICE_ID} +mkdir -p ./test/output/${ASCEND_DEVICE_ID} + +# 训练开始时间记录,不需要修改 +start_time=$(date +%s) +########################################################## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +########################################################## + +#========================================================= +#========================================================= +#========训练执行命令,需要根据您的网络进行修改============== +#========================================================= +#========================================================= +# 基础参数,需要模型审视修改 +# 您的训练数据集在${data_path}路径下,请直接使用这个变量获取 +# 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取 +# 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值 +pwd + +python -m pip install --upgrade pip + +pip install keras==2.2.4 + +cp -r ./mat/* /usr/local/Ascend/ascend-toolkit/5.1.RC2.alpha001/arm64-linux/opp/op_impl/built-in/ai_core/tbe/impl/ + + +pip3 list | grep -i keras + +ls ./h5 +mkdir -p ~/.keras/models/ +#ls ~ +cp -r ./h5/* ~/.keras/models/ +ls ~/.keras/models/ + +#export ASCEND_GLOBAL_LOG_LEVEL=0 +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 + +export LD_PRELOAD=/home/ma-user/miniconda3/envs/TensorFlow-1.15-arm/bin/../lib/libgomp.so.1:$LD_PRELOAD + +batch_size=16 + +if [ x"${modelarts_flag}" != x ]; +then + + + python3.7 ./train.py \ + --input_dir=${data_path} \ + --output_dir=${output_path} \ + --batch_size=16 \ + --epochs=500 \ + --number_of_images=8000 \ + --train_test_ratio=0.8 + +else + + + python3.7 ./train.py \ + --input_dir=${data_path} \ + --output_dir=${output_path} \ + --batch_size=16 \ + --epochs=500 \ + --number_of_images=8000 \ + --train_test_ratio=0.8 + > ${print_log} +fi + + + +# 性能相关数据计算 +StepTime=`grep "sec/step :" ${print_log} | tail -n 10 | awk '{print $NF}' | awk '{sum+=$1} END {print sum/NR}'` +FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'/'${StepTime}'}'` + +# 精度相关数据计算 +train_accuracy=`grep "Final Accuracy accuracy" ${print_log} | awk '{print $NF}'` +# 提取所有loss打印信息 +grep "loss :" ${print_log} | awk -F ":" '{print $4}' | awk -F "-" '{print $1}' > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt + + +########################################################### +#########后面的所有内容请不要修改########################### +#########后面的所有内容请不要修改########################### +#########后面的所有内容请不要修改########################### +########################################################### + +# 判断本次执行是否正确使用Ascend NPU +use_npu_flag=`grep "The model has been compiled on the Ascend AI processor" ${print_log} | wc -l` +if [ x"${use_npu_flag}" == x0 ]; +then + echo "------------------ ERROR NOTICE START ------------------" + echo "ERROR, your task haven't used Ascend NPU, please check your npu Migration." + echo "------------------ ERROR NOTICE END------------------" +else + echo "------------------ INFO NOTICE START------------------" + echo "INFO, your task have used Ascend NPU, please check your result." + echo "------------------ INFO NOTICE END------------------" +fi + +# 获取最终的casename,请保留,case文件名为${CaseName} +get_casename + +# 重命名loss文件 +if [ -f ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ]; +then + mv ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt +fi + +# 训练端到端耗时 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +echo "------------------ Final result ------------------" +# 输出性能FPS/单step耗时/端到端耗时 +echo "Final Performance images/sec : $FPS" +echo "Final Performance sec/step : $StepTime" +echo "E2E Training Duration sec : $e2e_time" + +# 输出训练精度 +echo "Final Train Accuracy : ${train_accuracy}" + +# 最后一个迭代loss值,不需要修改 +ActualLoss=(`awk 'END {print $NF}' $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}_loss.txt`) + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${batch_size}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = `uname -m`" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${FPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${StepTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/train.py b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/train.py new file mode 100644 index 0000000000000000000000000000000000000000..9bd0708bcfab97661fcbabdd9ef4a667a2924123 --- /dev/null +++ b/TensorFlow/contrib/cv/SRGAN_ID2087_for_Tensorflow/train.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python +#title :train.py +#description :to train the model +#author :Deepak Birla +#date :2018/10/30 +#usage :python train.py --options +#python_version :3.5.4 +# encoding: utf-8 + +import os +#print(os.system("pip install keras==2.2.4")) + +from Network import Generator, Discriminator +import Utils_model, Utils +from Utils_model import VGG_LOSS +import tensorflow as tf +import keras as keras +from keras import backend as K +from keras.models import Model +from keras.layers import Input +from tqdm import tqdm +import numpy as np +import argparse + +from npu_bridge.npu_init import * +sess_config=tf.ConfigProto() +custom_op =sess_config.graph_options.rewrite_options.custom_optimizers.add() +custom_op.name = "NpuOptimizer" +sess_config.graph_options.rewrite_options.remapping=RewriterConfig.OFF +sess_config.graph_options.rewrite_options.memory_optimization=RewriterConfig.OFF + +# custom_op.parameter_map["precision_mode"].s=tf.compat.as_bytes("force_fp32") +#custom_op.parameter_map["precision_mode"].s=tf.compat.as_bytes("allow_mix_precision") + + + +#import precision_tool.tf_config as npu_tf_config +#sess_config = npu_tf_config.session_dump_config(sess_config, action='overflow') +#npu_keras_sess = set_keras_session_npu_config(config=sess_config) + +#b +#import moxing as mox +#import precision_tool.config as CONFIG + + +# +#custom_op.parameter_map["fusion_switch_file"].s=tf.compat.as_bytes("/home/ma-user/modelarts/user-job-dir/code/fusion_switch.cfg") +sess=tf.Session(config=sess_config) +K.set_session(sess) + +np.random.seed(10) +# Better to use downscale factor as 4 +downscale_factor = 4 +# Remember to change image shape if you are having different size of images +image_shape = (192,192,3) + + + +# Combined network +def get_gan_network(discriminator, shape, generator, optimizer, vgg_loss): + discriminator.trainable = False + gan_input = Input(shape=shape) + x = generator(gan_input) + gan_output = discriminator(x) + gan = Model(inputs=gan_input, outputs=[x,gan_output]) + gan.compile(loss=[vgg_loss, "binary_crossentropy"], + loss_weights=[1., 1e-3], + optimizer=optimizer) + + return gan + +# default values for all parameters are given, if want defferent values you can give via commandline +# for more info use $python train.py -h +def train(epochs, batch_size, input_dir, output_dir, model_save_dir, number_of_images, train_test_ratio): + + x_train_lr, x_train_hr, x_test_lr, x_test_hr = Utils.load_training_data(input_dir, '.jpg', number_of_images, train_test_ratio) + + loss = VGG_LOSS(image_shape) + + batch_count = int(x_train_hr.shape[0] / batch_size) + shape = (image_shape[0]//downscale_factor, image_shape[1]//downscale_factor, image_shape[2]) + + generator = Generator(shape).generator() + discriminator = Discriminator(image_shape).discriminator() + + optimizer = Utils_model.get_optimizer() + + + generator.compile(loss=loss.vgg_loss, optimizer=optimizer) + discriminator.compile(loss="binary_crossentropy", optimizer=optimizer) + + gan = get_gan_network(discriminator, shape, generator, optimizer, loss.vgg_loss) + + loss_file = open(model_save_dir + 'losses.txt' , 'w+') + loss_file.close() + print("huanjing") + #print (os.environ) + #print (os.system("pip3 list")) + #print (custom_op.parameter_map) + + + for e in range(1, epochs+1): + print ('-'*15, 'Epoch %d' % e, '-'*15) + for _ in tqdm(range(batch_count)): + + rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) + + image_batch_hr = x_train_hr[rand_nums] + image_batch_lr = x_train_lr[rand_nums] + generated_images_sr = generator.predict(image_batch_lr) + + real_data_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2 + fake_data_Y = np.random.random_sample(batch_size)*0.2 + + discriminator.trainable = True + + d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y) + d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y) + discriminator_loss = 0.5 * np.add(d_loss_fake, d_loss_real) + + rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) + + image_batch_hr = x_train_hr[rand_nums] + image_batch_lr = x_train_lr[rand_nums] + + gan_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2 + discriminator.trainable = False + gan_loss = gan.train_on_batch(image_batch_lr, [image_batch_hr,gan_Y]) + + + print("discriminator_loss : %f" % discriminator_loss) + print("gan_loss :", gan_loss) + gan_loss = str(gan_loss) + + loss_file = open(model_save_dir + 'losses.txt' , 'a') + loss_file.write('epoch%d : gan_loss = %s ; discriminator_loss = %f\n' %(e, gan_loss, discriminator_loss) ) + loss_file.close() + + if e % 2 == 0: + + Utils.plot_generated_images(output_dir, e, generator, x_test_hr, x_test_lr) + if e % 500 == 0: + generator.save(model_save_dir + 'gen_model%d.h5' % e) + discriminator.save(model_save_dir + 'dis_model%d.h5' % e) + + +if __name__== "__main__": + + parser = argparse.ArgumentParser() + + parser.add_argument('-i', '--input_dir', action='store', dest='input_dir', default='./tra/' , + help='Path for input images') + + parser.add_argument('-o', '--output_dir', action='store', dest='output_dir', default='./output/' , + help='Path for Output images') + + parser.add_argument('-m', '--model_save_dir', action='store', dest='model_save_dir', default='./model/' , + help='Path for model') + + parser.add_argument('-b', '--batch_size', action='store', dest='batch_size', default=16, + help='Batch Size', type=int) + + parser.add_argument('-e', '--epochs', action='store', dest='epochs', default=800 , + help='number of iteratios for trainig', type=int) + + parser.add_argument('-n', '--number_of_images', action='store', dest='number_of_images', default=10000 , + help='Number of Images', type= int) + + parser.add_argument('-r', '--train_test_ratio', action='store', dest='train_test_ratio', default=0.8 , + help='Ratio of train and test Images', type=float) + + values = parser.parse_args() + #print("shuru:"+values) + train(values.epochs, values.batch_size, values.input_dir, values.output_dir, values.model_save_dir, values.number_of_images, values.train_test_ratio) + + + +#FLAGS.obs_dir="/home/ma-user/modelarts/outputs/train_url_0/" +#obs_overflow_dir = os.path.join(FLAGS.obs_dir, 'overflow') +#if not mox.file.exists(obs_overflow_dir): +# mox.file.make_dirs(obs_overflow_dir) +# files = os.listdir(CONFIG.ROOT_DIR) +#mox.file.copy_parallel(src_url=CONFIG.ROOT_DIR, dst_url=obs_overflow_dir) + +sess.close() \ No newline at end of file