From 8e9ea7eea881095765e7286ab295e942ed1ba4cd Mon Sep 17 00:00:00 2001 From: youren <1430728744@qq.com> Date: Thu, 5 May 2022 14:37:52 +0000 Subject: [PATCH 01/11] update TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md. --- .../cv/Gitloss_ID1277_for_TensorFlow/README.md | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md index 92e845849..8fbe2fba0 100644 --- a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md +++ b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md @@ -99,7 +99,21 @@ steps: The train steps, default is 8000 | 平台| 性能 | |--|--| | GPU(V100)| 10ms/step | -| NPU(Ascend910)| 25.5ms/step | +| NPU(Ascend910)| 7.9ms/step | + +NPU性能详情 +``` +------------------ INFO NOTICE START------------------ +INFO, your task have used Ascend NPU, please check your result. +------------------ INFO NOTICE END------------------ +------------------ Final result ------------------ +Final Performance images/sec :16,202 image/sec 备注:128 batch +Final Performance sec/step : 0.0079 sec/step +E2E Training Duration sec : 136.11秒 (1000 step) +Final Train Accuracy : NA +备注:使用x86机器本地复现 +``` + #### 精度结果 ##### GPU结果 ``` -- Gitee From de6f53f3c185c934ea6ee22265ddc0ae300a2a53 Mon Sep 17 00:00:00 2001 From: youren <1430728744@qq.com> Date: Fri, 6 May 2022 03:17:56 +0000 Subject: [PATCH 02/11] add TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/gitloss_perf.py. --- .../gitloss_perf.py | 315 ++++++++++++++++++ 1 file changed, 315 insertions(+) create mode 100644 TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/gitloss_perf.py diff --git a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/gitloss_perf.py b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/gitloss_perf.py new file mode 100644 index 000000000..ffceb51cc --- /dev/null +++ b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/gitloss_perf.py @@ -0,0 +1,315 @@ +# MIT License +# +# Copyright (c) 2018 Kamran Janjua + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from npu_bridge.npu_init import * + +import os +import numpy as np +import tensorflow as tf +import tflearn +from tensorflow.examples.tutorials.mnist import input_data +import matplotlib.pyplot as plt +import itertools, math +import pathlib +import tensorflow.contrib.layers as initializers +from scipy.spatial import distance +import time + +CENTER_LOSS_ALPHA = 0.5 +NUM_CLASSES = 10 +plt_range = 5 + +distArr = [] +avgArr = [] + +threshold = 0.4 +range_val = 2 +slim = tf.contrib.slim + +tf.app.flags.DEFINE_integer('update_centers', 1000, 'numbers of steps after which update the centers.') +tf.app.flags.DEFINE_float('lambda_c', 1.0, 'The weight of the center loss') +tf.app.flags.DEFINE_float('lambda_g', 1.0, 'The weight of the git loss') +tf.app.flags.DEFINE_integer('steps', 8000, 'The train steps') +tf.app.flags.DEFINE_string('exp_save_dir', "./test/output", 'The train save') +FLAGS = tf.app.flags.FLAGS + + +epoch = 0 +counter = 0 + + +def get_centers(feat_list, label_list): + centers_list = [] + for idx in range(10): + list_of_indices = [n for n, x in enumerate(label_list) if x == idx] + + items_of_class = [] + for item in list_of_indices: + got_feat = [float(i) for i in feat_list[item]] + items_of_class.append(got_feat) + + mean = np.mean(items_of_class, axis=0) + centers_list.append(mean) + return np.asarray(centers_list) + + +def get_intra_class_distance(feat_lst, label_lst, centers): + distances_list = [] + for idx in range(10): + list_of_indices = [n for n, x in enumerate(label_lst) if x == idx] + + list_for_class = [] + for item in list_of_indices: + got_feat = [float(i) for i in feat_lst[item]] + list_for_class.append(got_feat) + + distance_feat_from_center = [] + for item in list_for_class: + distance_feat_from_center.append(distance.euclidean(item, centers[idx])) + intraclass_distance = np.mean(distance_feat_from_center, axis=0) + distances_list.append(intraclass_distance) + return distances_list + + +with tf.name_scope('input'): + input_images = tf.placeholder(tf.float32, shape=(None, 28, 28, 1), name='input_images') + labels = tf.placeholder(tf.int64, shape=(None), name='labels') + +global_step = tf.Variable(0, trainable=False, name='global_step') + +def get_distances(features, labels, num_classes): + len_features = features.get_shape()[1] + centers = tf.get_variable('centers', [num_classes, len_features], dtype=tf.float32, + initializer=tf.constant_initializer(0), trainable=False) + labels = tf.reshape(labels, [-1]) + centers_batch = tf.gather(centers, labels) + + diff = centers_batch - features + unique_label, unique_idx, unique_count = tf.unique_with_counts(labels) + appear_times = tf.gather(unique_count, unique_idx) + appear_times = tf.reshape(appear_times, [-1, 1]) + + diff = tf.divide(diff, tf.cast((1 + appear_times), tf.float32)) + + return diff + + +def get_git_loss(features, labels, num_classes): + len_features = features.get_shape()[1] + centers = tf.get_variable('centers', [num_classes, len_features], dtype=tf.float32, + initializer=tf.constant_initializer(0), trainable=False) + labels = tf.reshape(labels, [-1]) + centers_batch = tf.gather(centers, labels) + + loss = tf.reduce_mean(tf.square(features - centers_batch)) + + # Pairwise differences + diffs = (features[:, tf.newaxis] - centers_batch[tf.newaxis, :]) + diffs_shape = tf.shape(diffs) + + # Mask diagonal (where i == j) + mask = 1 - tf.eye(diffs_shape[0], diffs_shape[1], dtype=diffs.dtype) + diffs = diffs * mask[:, :, tf.newaxis] + + # combinaton of two losses + loss2 = tf.reduce_mean(tf.divide(1, 1 + tf.square(diffs))) + + diff = centers_batch - features + unique_label, unique_idx, unique_count = tf.unique_with_counts(labels) + appear_times = tf.gather(unique_count, unique_idx) + appear_times = tf.reshape(appear_times, [-1, 1]) + + diff = tf.divide(diff, tf.cast((1 + appear_times), tf.float32)) + diff = CENTER_LOSS_ALPHA * diff + + centers_update_op = tf.scatter_sub(centers, labels, diff) # diff is used to get updated centers. + + # combo_loss = value_factor * loss + new_factor * loss2 + combo_loss = FLAGS.lambda_c * loss + FLAGS.lambda_g * loss2 + + return combo_loss, centers_update_op + + +def inference(input_images): + with slim.arg_scope([slim.conv2d], kernel_size=3, padding='SAME'): + with slim.arg_scope([slim.max_pool2d], kernel_size=2): + x = slim.conv2d(input_images, num_outputs=32, weights_initializer=initializers.xavier_initializer(), + scope='conv1_1') + x = slim.conv2d(x, num_outputs=32, weights_initializer=initializers.xavier_initializer(), scope='conv1_2') + x = slim.max_pool2d(x, scope='pool1') + x = slim.conv2d(x, num_outputs=64, weights_initializer=initializers.xavier_initializer(), scope='conv2_1') + x = slim.conv2d(x, num_outputs=64, weights_initializer=initializers.xavier_initializer(), scope='conv2_2') + x = slim.max_pool2d(x, scope='pool2') + x = slim.conv2d(x, num_outputs=128, weights_initializer=initializers.xavier_initializer(), scope='conv3_1') + x = slim.conv2d(x, num_outputs=128, weights_initializer=initializers.xavier_initializer(), scope='conv3_2') + x = slim.max_pool2d(x, scope='pool3') + x = slim.flatten(x, scope='flatten') + feature = slim.fully_connected(x, num_outputs=2, activation_fn=None, scope='fc1') + x = tflearn.prelu(feature) + x = slim.fully_connected(x, num_outputs=10, activation_fn=None, scope='fc2') + return x, feature + + +def build_network(input_images, labels): + logits, features = inference(input_images) + + with tf.variable_scope('loss') as scope: + + with tf.name_scope('git_loss'): + git_loss, centers_update_op_int = get_git_loss(features, labels, NUM_CLASSES) + scope.reuse_variables() + with tf.name_scope('softmax_loss'): + softmax_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)) + with tf.name_scope('total_loss'): + total_loss = softmax_loss + git_loss + + with tf.name_scope('acc'): + accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(logits, 1), labels), tf.float32)) + + with tf.name_scope('loss/'): + + tf.summary.scalar('SoftmaxLoss', softmax_loss) + tf.summary.scalar('TotalLoss', total_loss) + + with tf.name_scope('dist'): + distances_op = get_distances(features, labels, NUM_CLASSES) + + return logits, features, total_loss, accuracy, centers_update_op_int, distances_op # returns total loss + + + +logits, features, total_loss, accuracy, centers_update_op, distances_op = build_network(input_images, labels) +mnist = input_data.read_data_sets('./data/mnist', reshape=False) +optimizer = tf.train.AdamOptimizer(0.001) # learning rate. +train_op = optimizer.minimize(total_loss, global_step=global_step) + +summary_op = tf.summary.merge_all() +# sess = tf.Session(config=npu_config_proto()) +config = tf.ConfigProto() + +custom_op = config.graph_options.rewrite_options.custom_optimizers.add() +custom_op.name = "NpuOptimizer" +custom_op.parameter_map["use_off_line"].b = True +custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") +config.graph_options.rewrite_options.remapping = RewriterConfig.OFF +config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF + +sess = tf.Session(config=config) +sess.run(tf.global_variables_initializer()) + +mean_data = np.mean(mnist.train.images, axis=0) +step = sess.run(global_step) + 1 + + +exp_save_dir = FLAGS.exp_save_dir +pathlib.Path(exp_save_dir).mkdir(parents=True, exist_ok=True) +batch_size = 128 +intra_cls_dist = 0 +vali_acc = 0 +inter_cls_dist = 0 +with open(exp_save_dir + "/loss+perf_gpu.txt", "w") as text_file: + while step < FLAGS.steps: + + batch_images, batch_labels = mnist.train.next_batch(batch_size) +# print(batch_images.shape) + _, summary_str, train_acc, train_loss, updated_centers = sess.run( + [train_op, summary_op, accuracy, total_loss, centers_update_op], + feed_dict={ + input_images: batch_images - mean_data, + labels: batch_labels, + }) + + step += 1 + + if step % FLAGS.update_centers == 0: + + num_train_samples = mnist.train.num_examples + print('========num_train_samples=======',num_train_samples) + num_of_batches = num_train_samples // batch_size + print('========num_of_batches=======',num_of_batches) + centers = np.zeros([NUM_CLASSES, 2]) + all_features = [] + all_labels = [] + start_time = time.time() + for b in range(num_of_batches): + batch_images, batch_labels = mnist.train.next_batch(batch_size, shuffle=False) + feat2 = sess.run(features, feed_dict={input_images: batch_images - mean_data}) + all_features.extend(feat2) + all_labels.extend(batch_labels) + c = get_centers(feat2, batch_labels) + centers = np.sum(np.array([centers, c]), axis=0) + end_time = time.time() - start_time + + centers = centers / num_of_batches + + d = get_intra_class_distance(all_features, all_labels, centers) + # print(d) + intra_cls_dist = np.mean(np.asarray(d)) + # print("intra class distance %f" % intra_cls_dist) + + for i, j in itertools.combinations(centers, 2): + distance1 = math.sqrt(((i[0] - j[0]) ** 2) + ((i[1] - j[1]) ** 2)) + distArr.append(distance1) + inter_cls_dist = float(sum(distArr)) / len(distArr) + avgArr.append(inter_cls_dist) + # print("The average distance between two centers is: ", inter_cls_dist) + + # print(("Step: {}, Loss: {:.4f}".format(step, train_loss))) # prints training loss and steps. + epoch += 1 + # vali_image = mnist.validation.images - mean_data + + # vali_acc, vali_loss = sess.run( + # [accuracy, total_loss], + # feed_dict={ + # input_images: vali_image, + # labels: mnist.validation.labels + # }) + + + print(("Step: {}, Epoch: {}, Train_Loss: {:.4f} , Train_Acc: {:.4f} , inter_cls_dist: {:.4f} , intra_cls_dist: {:.4f} , train_time: {:.4f}". + format(step, epoch, train_loss, train_acc, inter_cls_dist, intra_cls_dist, end_time))) + + text_file.write( + ( + "Step:\t{}, Epoch: {}, Train_Loss:\t{:.4f}, Train_Acc:\t{:.4f}, inter_cls_dist:\t{:.4f}, intra_cls_dist:\t{:.4f}\n , train_time:\t{:.4f}\n". + format(step, epoch, train_loss, train_acc, inter_cls_dist, intra_cls_dist, end_time))) + + + if step == FLAGS.steps - 1: + tf.train.Saver().save(sess, "ckpt_npu/model.ckpt") + tf.io.write_graph(sess.graph, './ckpt_npu', 'graph.pbtxt', as_text=True) + + -- Gitee From fbe4daf9cda743bc66717befccce7cd3af3c1181 Mon Sep 17 00:00:00 2001 From: youren <1430728744@qq.com> Date: Fri, 6 May 2022 03:19:42 +0000 Subject: [PATCH 03/11] update TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh. --- .../test/train_performance_1p.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh index 3732084c6..ddbe91109 100644 --- a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh +++ b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh @@ -123,16 +123,16 @@ do #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 #--data_dir, --model_dir, --precision_mode, --over_dump, --over_dump_path,--data_dump_flag,--data_dump_step,--data_dump_path,--profiling,--profiling_dump_path - sed -i "s|"./data/mnist"|"${data_path}"|g" gitloss.py + sed -i "s|"./data/mnist"|"${data_path}"|g" gitloss_pref.py - python3 gitloss.py \ - --update_centers=10 \ + python3 gitloss_pref.py \ + --update_centers=1000 \ --lambda_c=1.0 \ --lambda_g=1.0 \ --steps=100 \ --exp_save_dir $cur_path/test/output/${ASCEND_DEVICE_ID} > $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 - sed -i "s|"${data_path}"|"./data/mnist"|g" gitloss.py + sed -i "s|"${data_path}"|"./data/mnist"|g" gitloss_pref.py -- Gitee From dc5f45aa23539727b1be52dffd10ff859d4f8a06 Mon Sep 17 00:00:00 2001 From: youren <1430728744@qq.com> Date: Fri, 6 May 2022 03:20:21 +0000 Subject: [PATCH 04/11] update TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh. --- .../test/train_performance_1p.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh index ddbe91109..da79c7afa 100644 --- a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh +++ b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh @@ -123,16 +123,16 @@ do #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 #--data_dir, --model_dir, --precision_mode, --over_dump, --over_dump_path,--data_dump_flag,--data_dump_step,--data_dump_path,--profiling,--profiling_dump_path - sed -i "s|"./data/mnist"|"${data_path}"|g" gitloss_pref.py + sed -i "s|"./data/mnist"|"${data_path}"|g" gitloss_perf.py - python3 gitloss_pref.py \ + python3 gitloss_perf.py \ --update_centers=1000 \ --lambda_c=1.0 \ --lambda_g=1.0 \ --steps=100 \ --exp_save_dir $cur_path/test/output/${ASCEND_DEVICE_ID} > $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 - sed -i "s|"${data_path}"|"./data/mnist"|g" gitloss_pref.py + sed -i "s|"${data_path}"|"./data/mnist"|g" gitloss_perf.py -- Gitee From fbaf50082fd6c603abf1ffe0f72922ae999006ce Mon Sep 17 00:00:00 2001 From: youren <1430728744@qq.com> Date: Fri, 6 May 2022 03:20:46 +0000 Subject: [PATCH 05/11] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20Te?= =?UTF-8?q?nsorFlow/contrib/cv/Gitloss=5FID1277=5Ffor=5FTensorFlow/test/ru?= =?UTF-8?q?n=5F1p.sh?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../cv/Gitloss_ID1277_for_TensorFlow/test/run_1p.sh | 8 -------- 1 file changed, 8 deletions(-) delete mode 100644 TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/run_1p.sh diff --git a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/run_1p.sh b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/run_1p.sh deleted file mode 100644 index 1aa948d34..000000000 --- a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/run_1p.sh +++ /dev/null @@ -1,8 +0,0 @@ -#/bin/bash - -# source activate /home/ma-user/miniconda3/envs/TensorFlow-1.15.0 -# pip install tflearn - -cd ../ - -python3 gitloss.py --update_centers=1000 --lambda_c=1.0 --lambda_g=1.0 --steps=8000 \ No newline at end of file -- Gitee From c7f3126371e4410a5217da70de971589af2e2aca Mon Sep 17 00:00:00 2001 From: youren <1430728744@qq.com> Date: Fri, 6 May 2022 03:21:50 +0000 Subject: [PATCH 06/11] update TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh. --- .../Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh index da79c7afa..634537252 100644 --- a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh +++ b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh @@ -16,6 +16,7 @@ ckpt_path='' #设置默认日志级别,不需要修改 export ASCEND_GLOBAL_LOG_LEVEL=3 +export ASCEND_GLOBAL_EVENT_ENABLE=0 #export ASCEND_DEVICE_ID=3 #基础参数,需要模型审视修改 -- Gitee From 8356906aa259368f13bcf3b8645cc7a554dd591e Mon Sep 17 00:00:00 2001 From: youren <1430728744@qq.com> Date: Fri, 6 May 2022 04:21:46 +0000 Subject: [PATCH 07/11] update TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md. --- .../Gitloss_ID1277_for_TensorFlow/README.md | 30 ++++++++++--------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md index 8fbe2fba0..45b795875 100644 --- a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md +++ b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md @@ -76,13 +76,13 @@ GitLoss ### Run command #### Use bash ``` -bash ./test/run_1p.sh -``` -#### Run directly +1. train_full_1p +bash ./test/train_full_1p.sh +2. train_performance_1p +bash ./test/train_performance_1p.sh ``` -python gitloss.py -``` + 参数注释: ``` update_centers: numbers of steps after which update the centers, default is 1000 @@ -99,19 +99,21 @@ steps: The train steps, default is 8000 | 平台| 性能 | |--|--| | GPU(V100)| 10ms/step | +| GPU(V100)| 1.7013s/epoch | | NPU(Ascend910)| 7.9ms/step | +| NPU(Ascend910)| 1.79s/epoch | NPU性能详情 ``` ------------------- INFO NOTICE START------------------ -INFO, your task have used Ascend NPU, please check your result. ------------------- INFO NOTICE END------------------ ------------------- Final result ------------------ -Final Performance images/sec :16,202 image/sec 备注:128 batch -Final Performance sec/step : 0.0079 sec/step -E2E Training Duration sec : 136.11秒 (1000 step) -Final Train Accuracy : NA -备注:使用x86机器本地复现 +------------------ INFO NOTICE START------------------ +INFO, your task have used Ascend NPU, please check your result. +------------------ INFO NOTICE END------------------ +------------------ Final result ------------------ +Final Performance images/sec :16,202 image/sec 备注:128 batch +Final Performance sec/step : 0.0079 sec/step +E2E Training Duration sec : 136.11秒 (1000 step) +Final Train Accuracy : NA +备注:使用x86机器本地复现 ``` #### 精度结果 -- Gitee From 20bbd880834914f45cc75abfeb340aaa56c86008 Mon Sep 17 00:00:00 2001 From: youren <1430728744@qq.com> Date: Fri, 6 May 2022 04:25:42 +0000 Subject: [PATCH 08/11] update TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md. --- .../contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md index 45b795875..8d527a35d 100644 --- a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md +++ b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md @@ -68,8 +68,10 @@ GitLoss ├─data 存放数据集文件夹 ├─test ├─output 存放模型运行日志文件夹 - ├─run_1p.sh 代码运行脚本 - ├─gitloss.py 模型定义及主函数 + ├─train_full_1p.sh 训练及验证,验证精度 + ├─train_performance_1p.sh 仅训练,验证性能 + ├─gitloss.py 模型定义及主函数(训练及验证) + ├─gitloss_perf.py 模型定义及主函数(仅训练) ``` ## Running the code -- Gitee From cb78d2ebee387eb2512f48a0cce76ef1fcdac254 Mon Sep 17 00:00:00 2001 From: youren <1430728744@qq.com> Date: Fri, 6 May 2022 04:26:00 +0000 Subject: [PATCH 09/11] update TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md. --- TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md index 8d527a35d..ee089556e 100644 --- a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md +++ b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/README.md @@ -69,7 +69,7 @@ GitLoss ├─test ├─output 存放模型运行日志文件夹 ├─train_full_1p.sh 训练及验证,验证精度 - ├─train_performance_1p.sh 仅训练,验证性能 + ├─train_performance_1p.sh 仅训练,验证性能 ├─gitloss.py 模型定义及主函数(训练及验证) ├─gitloss_perf.py 模型定义及主函数(仅训练) ``` -- Gitee From 75a6974d82cb81a3f6adae18755fff0756f7b90f Mon Sep 17 00:00:00 2001 From: youren <1430728744@qq.com> Date: Fri, 6 May 2022 04:28:28 +0000 Subject: [PATCH 10/11] update TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh. --- .../Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh index 634537252..b6fe923d5 100644 --- a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh +++ b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/train_performance_1p.sh @@ -130,7 +130,7 @@ do --update_centers=1000 \ --lambda_c=1.0 \ --lambda_g=1.0 \ - --steps=100 \ + --steps=8000 \ --exp_save_dir $cur_path/test/output/${ASCEND_DEVICE_ID} > $cur_path/test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 sed -i "s|"${data_path}"|"./data/mnist"|g" gitloss_perf.py -- Gitee From 165a87ed1a00c40efed83a0e89a6db4d761f9cb7 Mon Sep 17 00:00:00 2001 From: youren <1430728744@qq.com> Date: Fri, 6 May 2022 04:40:31 +0000 Subject: [PATCH 11/11] add TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/run_1p.sh. --- .../cv/Gitloss_ID1277_for_TensorFlow/test/run_1p.sh | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/run_1p.sh diff --git a/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/run_1p.sh b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/run_1p.sh new file mode 100644 index 000000000..76a6a2ec8 --- /dev/null +++ b/TensorFlow/contrib/cv/Gitloss_ID1277_for_TensorFlow/test/run_1p.sh @@ -0,0 +1,8 @@ +#/bin/bash + +# source activate /home/ma-user/miniconda3/envs/TensorFlow-1.15.0 +# pip install tflearn + +cd ../ + +python gitloss.py --update_centers=1000 --lambda_c=1.0 --lambda_g=1.0 --steps=8000 \ No newline at end of file -- Gitee