From a1da9ed3c77d56aec0c4dd902eadc60cf68099c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=B4=8B=E6=B4=8B?= <584244991@qq.com> Date: Tue, 13 Dec 2022 03:31:25 +0000 Subject: [PATCH 1/4] update TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/test/train_full_1p.sh. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 张洋洋 <584244991@qq.com> --- .../test/train_full_1p.sh | 62 ++++++++++++++----- 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/test/train_full_1p.sh b/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/test/train_full_1p.sh index 0f0a73eb8..f16ec2199 100644 --- a/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/test/train_full_1p.sh +++ b/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/test/train_full_1p.sh @@ -1,9 +1,9 @@ #!/bin/bash ########################################################## -#########第3行 至 90行,请一定不要、不要、不要修改########## -#########第3行 至 90行,请一定不要、不要、不要修改########## -#########第3行 至 90行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## ########################################################## # shell脚本所在路径 cur_path=`echo $(cd $(dirname $0);pwd)` @@ -30,7 +30,7 @@ if [[ $1 == --help || $1 == -h ]];then --data_path # dataset of training --output_path # output of training --train_steps # max_step for training - --train_epochs # max_epoch for training + --train_epochs # max_epoch for training --batch_size # batch size -h/--help show help message " @@ -64,6 +64,17 @@ if [[ $output_path == "" ]];then output_path="./test/output/${ASCEND_DEVICE_ID}" fi +# 设置打屏日志文件名,请保留,文件名为${print_log} +print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log" +modelarts_flag=${MODELARTS_MODEL_PATH} +if [ x"${modelarts_flag}" != x ]; +then + echo "running without etp..." + print_log_name=`ls /home/ma-user/modelarts/log/ | grep proc-rank` + print_log="/home/ma-user/modelarts/log/${print_log_name}" +fi +echo "### get your log here : ${print_log}" + CaseName="" function get_casename() { @@ -83,9 +94,9 @@ mkdir -p ./test/output/${ASCEND_DEVICE_ID} # 训练开始时间记录,不需要修改 start_time=$(date +%s) ########################################################## -#########第3行 至 90行,请一定不要、不要、不要修改########## -#########第3行 至 90行,请一定不要、不要、不要修改########## -#########第3行 至 90行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## ########################################################## #========================================================= @@ -99,19 +110,27 @@ start_time=$(date +%s) # 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值 batch_size=128 -# 设置打屏日志文件名,请保留,文件名为${print_log} -print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log" - -python3.7 ./train.py --data_url=${data_path} --train_url=${output_path} 1>${print_log} 2>&1 +python3.7 ./train.py \ + --data_url=${data_path}/dataset \ + --train_url=${output_path} \ + --train1=${data_path}/dataset/iwslt2016/segmented/train.de.bpe \ + --train2=${data_path}/dataset/iwslt2016/segmented/train.en.bpe \ + --eval1=${data_path}/dataset/iwslt2016/segmented/eval.de.bpe \ + --eval2=${data_path}/dataset/iwslt2016/segmented/eval.en.bpe \ + --eval3=${data_path}/dataset/iwslt2016/prepro/eval.en \ + --vocab=${data_path}/dataset/iwslt2016/segmented/bpe.vocab \ + --num_epochs=1 \ + --test1=${data_path}/dataset/iwslt2016/segmented/test.de.bpe \ + --test2=${data_path}/dataset/iwslt2016/prepro/test.en 1>${print_log} 2>&1 # 性能相关数据计算 -StepTime=`grep "sec/step :" ${print_log} | tail -n 10 | awk '{print $NF}' | awk '{sum+=$1} END {print sum/NR}'` +StepTime=`grep "s/it" ${print_log} | awk 'END {print $7}' | tr -d "s/it]"` FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'/'${StepTime}'}'` # 精度相关数据计算 -train_accuracy=`grep "Final Accuracy accuracy" ${print_log} | awk '{print $NF}'` +train_accuracy=`grep "loss:" ${print_log} | awk -F "loss: " '{print $2}' | awk 'END {print $NF}'` # 提取所有loss打印信息 -grep "loss :" ${print_log} | awk -F ":" '{print $4}' | awk -F "-" '{print $1}' > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt +grep "loss:" ${print_log} | awk -F "loss: " '{print $2}' > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ########################################################### #########后面的所有内容请不要修改########################### @@ -119,6 +138,19 @@ grep "loss :" ${print_log} | awk -F ":" '{print $4}' | awk -F "-" '{print $1}' > #########后面的所有内容请不要修改########################### ########################################################### +# 判断本次执行是否正确使用Ascend NPU +use_npu_flag=`grep "The model has been compiled on the Ascend AI processor" ${print_log} | wc -l` +if [ x"${use_npu_flag}" == x0 ]; +then + echo "------------------ ERROR NOTICE START ------------------" + echo "ERROR, your task haven't used Ascend NPU, please check your npu Migration." + echo "------------------ ERROR NOTICE END------------------" +else + echo "------------------ INFO NOTICE START------------------" + echo "INFO, your task have used Ascend NPU, please check your result." + echo "------------------ INFO NOTICE END------------------" +fi + # 获取最终的casename,请保留,case文件名为${CaseName} get_casename @@ -142,7 +174,7 @@ echo "E2E Training Duration sec : $e2e_time" echo "Final Train Accuracy : ${train_accuracy}" # 最后一个迭代loss值,不需要修改 -ActualLoss=(`awk 'END {print $NF}' ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt`) +ActualLoss=(`awk 'END {print $NF}' $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}_loss.txt`) #关键信息打印到${CaseName}.log中,不需要修改 echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log -- Gitee From b73d356c21d5e2cde336e0db47d6c5f8100cbbe5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=B4=8B=E6=B4=8B?= <584244991@qq.com> Date: Tue, 13 Dec 2022 03:32:23 +0000 Subject: [PATCH 2/4] update TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/test/train_performance_1p.sh. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 张洋洋 <584244991@qq.com> --- .../test/train_performance_1p.sh | 67 ++++++++++++++----- 1 file changed, 49 insertions(+), 18 deletions(-) diff --git a/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/test/train_performance_1p.sh b/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/test/train_performance_1p.sh index e86eb215b..f16ec2199 100644 --- a/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/test/train_performance_1p.sh +++ b/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/test/train_performance_1p.sh @@ -1,9 +1,9 @@ #!/bin/bash ########################################################## -#########第3行 至 90行,请一定不要、不要、不要修改########## -#########第3行 至 90行,请一定不要、不要、不要修改########## -#########第3行 至 90行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## ########################################################## # shell脚本所在路径 cur_path=`echo $(cd $(dirname $0);pwd)` @@ -30,7 +30,7 @@ if [[ $1 == --help || $1 == -h ]];then --data_path # dataset of training --output_path # output of training --train_steps # max_step for training - --train_epochs # max_epoch for training + --train_epochs # max_epoch for training --batch_size # batch size -h/--help show help message " @@ -64,6 +64,17 @@ if [[ $output_path == "" ]];then output_path="./test/output/${ASCEND_DEVICE_ID}" fi +# 设置打屏日志文件名,请保留,文件名为${print_log} +print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log" +modelarts_flag=${MODELARTS_MODEL_PATH} +if [ x"${modelarts_flag}" != x ]; +then + echo "running without etp..." + print_log_name=`ls /home/ma-user/modelarts/log/ | grep proc-rank` + print_log="/home/ma-user/modelarts/log/${print_log_name}" +fi +echo "### get your log here : ${print_log}" + CaseName="" function get_casename() { @@ -83,9 +94,9 @@ mkdir -p ./test/output/${ASCEND_DEVICE_ID} # 训练开始时间记录,不需要修改 start_time=$(date +%s) ########################################################## -#########第3行 至 90行,请一定不要、不要、不要修改########## -#########第3行 至 90行,请一定不要、不要、不要修改########## -#########第3行 至 90行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## +#########第3行 至 100行,请一定不要、不要、不要修改########## ########################################################## #========================================================= @@ -97,23 +108,29 @@ start_time=$(date +%s) # 您的训练数据集在${data_path}路径下,请直接使用这个变量获取 # 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取 # 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值 -train_epochs=1 -train_steps=34632 batch_size=128 -print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log" -python3.7 ./train.py --data_url=${data_path} --train_url=${output_path} --steps=${train_steps} 1>${print_log} 2>&1 - +python3.7 ./train.py \ + --data_url=${data_path}/dataset \ + --train_url=${output_path} \ + --train1=${data_path}/dataset/iwslt2016/segmented/train.de.bpe \ + --train2=${data_path}/dataset/iwslt2016/segmented/train.en.bpe \ + --eval1=${data_path}/dataset/iwslt2016/segmented/eval.de.bpe \ + --eval2=${data_path}/dataset/iwslt2016/segmented/eval.en.bpe \ + --eval3=${data_path}/dataset/iwslt2016/prepro/eval.en \ + --vocab=${data_path}/dataset/iwslt2016/segmented/bpe.vocab \ + --num_epochs=1 \ + --test1=${data_path}/dataset/iwslt2016/segmented/test.de.bpe \ + --test2=${data_path}/dataset/iwslt2016/prepro/test.en 1>${print_log} 2>&1 # 性能相关数据计算 -StepTime=`grep "sec/step :" ${print_log} | tail -n 10 | awk '{print $NF}' | awk '{sum+=$1} END {print sum/NR}'` +StepTime=`grep "s/it" ${print_log} | awk 'END {print $7}' | tr -d "s/it]"` FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'/'${StepTime}'}'` # 精度相关数据计算 -train_accuracy=`grep "Final Accuracy accuracy" ${print_log} | awk '{print $NF}'` +train_accuracy=`grep "loss:" ${print_log} | awk -F "loss: " '{print $2}' | awk 'END {print $NF}'` # 提取所有loss打印信息 -grep "loss :" ${print_log} | awk -F ":" '{print $4}' | awk -F "-" '{print $1}' > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt - +grep "loss:" ${print_log} | awk -F "loss: " '{print $2}' > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ########################################################### #########后面的所有内容请不要修改########################### @@ -121,6 +138,19 @@ grep "loss :" ${print_log} | awk -F ":" '{print $4}' | awk -F "-" '{print $1}' > #########后面的所有内容请不要修改########################### ########################################################### +# 判断本次执行是否正确使用Ascend NPU +use_npu_flag=`grep "The model has been compiled on the Ascend AI processor" ${print_log} | wc -l` +if [ x"${use_npu_flag}" == x0 ]; +then + echo "------------------ ERROR NOTICE START ------------------" + echo "ERROR, your task haven't used Ascend NPU, please check your npu Migration." + echo "------------------ ERROR NOTICE END------------------" +else + echo "------------------ INFO NOTICE START------------------" + echo "INFO, your task have used Ascend NPU, please check your result." + echo "------------------ INFO NOTICE END------------------" +fi + # 获取最终的casename,请保留,case文件名为${CaseName} get_casename @@ -144,7 +174,7 @@ echo "E2E Training Duration sec : $e2e_time" echo "Final Train Accuracy : ${train_accuracy}" # 最后一个迭代loss值,不需要修改 -ActualLoss=(`awk 'END {print $NF}' ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt`) +ActualLoss=(`awk 'END {print $NF}' $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}_loss.txt`) #关键信息打印到${CaseName}.log中,不需要修改 echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log @@ -155,4 +185,5 @@ echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}. echo "ActualFPS = ${FPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "TrainingTime = ${StepTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log -echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file -- Gitee From 80ff9e13f19459abb7822ebda540fa12e7c7a6e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=B4=8B=E6=B4=8B?= <584244991@qq.com> Date: Tue, 13 Dec 2022 03:38:24 +0000 Subject: [PATCH 3/4] update TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/train.py. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 张洋洋 <584244991@qq.com> --- .../train.py | 34 +++++-------------- 1 file changed, 8 insertions(+), 26 deletions(-) diff --git a/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/train.py b/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/train.py index c85005aa7..e60b729fe 100644 --- a/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/train.py +++ b/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/train.py @@ -56,25 +56,21 @@ logging.info("# hparams") hparams = Hparams() parser = hparams.parser hp = parser.parse_args() -# hp.data_url=os.path.dirname(os.path.realpath(__file__))+'/' print("data_url\n") print(hp.data_url) print("train_url\n") print(hp.train_url) +print(hp.logdir) save_hparams(hp, hp.logdir) logging.info("# Prepare train/eval batches") -train_batches, num_train_batches, num_train_samples = get_batch(hp.data_url+ hp.train1, hp.data_url+hp.train2, +train_batches, num_train_batches, num_train_samples = get_batch(hp.train1, hp.train2, hp.maxlen1, hp.maxlen2, - hp.data_url+hp.vocab, hp.batch_size, + hp.vocab, hp.batch_size, shuffle=True) -# eval_batches, num_eval_batches, num_eval_samples = get_batch(hp.data_url+hp.eval1, hp.data_url+hp.eval2, -# 100000, 100000, -# hp.data_url+hp.vocab, hp.batch_size, -# shuffle=False) -eval_batches, num_eval_batches, num_eval_samples = get_batch(hp.data_url+hp.eval1, hp.data_url+hp.eval2, +eval_batches, num_eval_batches, num_eval_samples = get_batch(hp.eval1, hp.eval2, hp.maxlen1, hp.maxlen2, - hp.data_url+hp.vocab, hp.batch_size, + hp.vocab, hp.batch_size, shuffle=False) # create a iterator of the correct shape and type @@ -93,15 +89,6 @@ y_hat = m.eval(xs, ys) logging.info("# Session") saver = tf.train.Saver(max_to_keep=hp.num_epochs) -####相关设置开关 -if not os.path.exists(hp.train_url + "/tmp/profiling"): os.makedirs(hp.train_url + "/tmp/profiling") -proPath=hp.train_url + "/tmp/profiling" -dumpPath=hp.train_url + "/tmp/overflow" -blackPath=hp.data_url+"/ops_info.json" -fusionPath=hp.data_url+"/fusion_switch.cfg" -switchPath=hp.data_url+"/switch_config.txt" -if not os.path.exists(dumpPath): os.makedirs(dumpPath) - config = tf.ConfigProto() custom_op = config.graph_options.rewrite_options.custom_optimizers.add() custom_op.name = "NpuOptimizer" @@ -110,7 +97,7 @@ config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 必须显 config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF # 必须显式关闭 with tf.Session(config=config) as sess: - ckpt = tf.train.latest_checkpoint(hp.train_url+hp.logdir) + ckpt = tf.train.latest_checkpoint(hp.train_url + hp.logdir) if ckpt is None: logging.info("Initializing from scratch") sess.run(tf.global_variables_initializer()) @@ -147,10 +134,7 @@ with tf.Session(config=config) as sess: model_output = "iwslt2016_E%02dL%.2f" % (epoch, _loss) if not os.path.exists(hp.train_url+hp.evaldir): os.makedirs(hp.train_url+hp.evaldir) translation = os.path.join(hp.train_url + hp.evaldir, model_output) - # if not os.path.exists(hp.train_url + hp.evaldir): os.makedirs(hp.train_url + hp.evaldir) - # translation = os.path.join(hp.train_url+hp.evaldir, model_output) - ####### with open(translation, 'w') as fout: fout.write("\n".join(hypotheses)) @@ -159,13 +143,11 @@ with tf.Session(config=config) as sess: logging.info("# save models") ckpt_name = os.path.join(hp.train_url+hp.logdir, model_output) - # if not os.path.exists(hp.train_url + hp.logdir): os.makedirs(hp.train_url + hp.logdir) - # ckpt_name = os.path.join(hp.train_url + hp.logdir, model_output) - ######## + saver.save(sess, ckpt_name, global_step=_gs) logging.info("after training of {} epochs, {} has been saved.".format(epoch, ckpt_name)) logging.info("# fall back to train mode") sess.run(train_init_op) -logging.info("Done") +logging.info("Done") \ No newline at end of file -- Gitee From 4a48fb4a7973eadac59e8781c5fc1301ff4d0bc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=B4=8B=E6=B4=8B?= <584244991@qq.com> Date: Tue, 13 Dec 2022 03:41:45 +0000 Subject: [PATCH 4/4] update TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/model.py. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 张洋洋 <584244991@qq.com> --- .../model.py | 39 +------------------ 1 file changed, 2 insertions(+), 37 deletions(-) diff --git a/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/model.py b/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/model.py index 3fe80d8bd..8259a033d 100644 --- a/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/model.py +++ b/TensorFlow/contrib/nlp/Transformer_ID2361__for_TensorFlow/model.py @@ -63,7 +63,7 @@ class Transformer: ''' def __init__(self, hp): self.hp = hp - self.token2idx, self.idx2token = load_vocab(hp.data_url+hp.vocab) + self.token2idx, self.idx2token = load_vocab(hp.vocab) self.embeddings = get_token_embeddings(self.hp.vocab_size, self.hp.d_model, zero_pad=True) def encode(self, xs, training=True): @@ -80,8 +80,6 @@ class Transformer: enc *= self.hp.d_model**0.5 # scale enc += positional_encoding(enc, self.hp.maxlen1) - #enc = tf.layers.dropout(enc, self.hp.dropout_rate, training=training) - #enc = npu_convert_dropout(enc, self.hp.dropout_rate, training=training) ## Blocks for i in range(self.hp.num_blocks): @@ -122,8 +120,6 @@ class Transformer: dec *= self.hp.d_model ** 0.5 # scale dec += positional_encoding(dec, self.hp.maxlen2) - #dec = tf.layers.dropout(dec, self.hp.dropout_rate, training=training) - #dec = npu_convert_dropout(dec, self.hp.dropout_rate, training=training) # Blocks for i in range(self.hp.num_blocks): @@ -191,25 +187,6 @@ class Transformer: # train_op= train_op.minimize(loss, global_step=global_step) - - # self.refine_optim = tf.train.AdamOptimizer(learning_rate=self.refine_lr).minimize(self.rec_loss, - # var_list=refine_var) - # #### - # self.refine_optim = tf.train.AdamOptimizer(learning_rate=self.refine_lr) - # - # loss_scale_opt = self.refine_optim - # loss_scale_manager = ExponentialUpdateLossScaleManager(init_loss_scale=2 ** 32, incr_every_n_steps=1000, - # decr_every_n_nan_or_inf=2, decr_ratio=0.5) - # self.refine_optim = NPULossScaleOptimizer(loss_scale_opt, loss_scale_manager) - # - # self.refine_optim = self.refine_optim.minimize(self.rec_loss, var_list=refine_var) - - # tf.summary.scalar('lr', lr) - # tf.summary.scalar("loss", loss) - # tf.summary.scalar("global_step", global_step) - # - # summaries = tf.summary.merge_all() - return loss, train_op, global_step #return loss, train_op, global_step @@ -235,16 +212,4 @@ class Transformer: _decoder_inputs = tf.concat((decoder_inputs, y_hat), 1) ys = (_decoder_inputs, y, y_seqlen, sents2) - # monitor a random sample - #n = tf.random_uniform((), 0, tf.shape(y_hat)[0]-1, tf.int32) - # sent1 = sents1[n] - # pred = convert_idx_to_token_tensor(y_hat[n], self.idx2token) - # sent2 = sents2[n] - - # tf.summary.text("sent1", sent1) - # tf.summary.text("pred", pred) - # tf.summary.text("sent2", sent2) - # summaries = tf.summary.merge_all() - - return y_hat - + return y_hat \ No newline at end of file -- Gitee