diff --git a/TensorFlow/built-in/cv/detection/SSD-Resnet50V1-FPN_ID1463_for_TensorFlow/test/train_RT2_full_8p_inceptionv2.sh b/TensorFlow/built-in/cv/detection/SSD-Resnet50V1-FPN_ID1463_for_TensorFlow/test/train_RT2_full_8p_inceptionv2.sh new file mode 100644 index 0000000000000000000000000000000000000000..7340a8251614a66f5692f51bdecdf13818ae62c9 --- /dev/null +++ b/TensorFlow/built-in/cv/detection/SSD-Resnet50V1-FPN_ID1463_for_TensorFlow/test/train_RT2_full_8p_inceptionv2.sh @@ -0,0 +1,142 @@ +#!bin/bash +cur_path=`pwd` +export PYTHONPATH=$cur_path/../models/research:$cur_path/../models/research/slim:$PYTHONPATH +#集合通信 +export RANK_SIZE=8 +export RANK_TABLE_FILE=$cur_path/../configs/${RANK_SIZE}p.json +export JOB_ID=10087 +RANK_ID_START=0 +ASCEND_DEVICE_ID_START=0 + +#数据集参数 +data_path="/data" + +#训练参数,需要根据模型修改 +Network="SSD-InceptionV2_ID0510_for_TensorFlow" +num_train_steps=50000 +batch_size=24 +ckpt_path=/checkpoints +pipeline_config=$cur_path/../models/research/configs/ssd_inception_v2_coco_8p.config + +#帮助提示,需要根据网络修改 +if [[ $1 == --help || $1 == -h ]];then + echo "usage: ./train_full_8p_inceptionv2.sh " + + echo "" + echo "parameter explain: + --num_train_steps training steps + --data_path source data of training + --ckpt_path pre-checkpoint path + --pipeline_config pipeline config path + -h/--help Show help message + " + exit 1 +fi + +#入参设置,需要根据网络修改 +for para in $* +do + if [[ $para == --num_train_steps* ]];then + num_train_steps=`echo ${para#*=}` + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --bind_core* ]]; then + bind_core=`echo ${para#*=}` + name_bind="_bindcore" + elif [[ $para == --ckpt_path* ]];then + ckpt_path=`echo ${para#*=}` + elif [[ $para == --pipeline_config* ]];then + pipeline_config=`echo ${para#*=}` + fi +done + +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi + + + +##########################执行训练######################### +start_time=$(date +%s) +cd $cur_path/../models/research +if [ -f ${pipeline_config}.bak ];then + cp ${pipeline_config}.bak ${pipeline_config} +else + cp ${pipeline_config} ${pipeline_config}.bak +fi + +sed -i "s%/checkpoints%${ckpt_path}%p" ${pipeline_config} +sed -i "s%/data/coco2017_tfrecords%${data_path}/coco2017_tfrecords%p" ${pipeline_config} + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); + do + export RANK_ID=$RANK_ID + export ASCEND_DEVICE_ID=$((ASCEND_DEVICE_ID_START+RANK_ID)) + echo "Device ID: $ASCEND_DEVICE_ID" + if [ -d $cur_path/output/${ASCEND_DEVICE_ID} ];then + rm -rf $cur_path/output/${ASCEND_DEVICE_ID} + mkdir -p $cur_path/output/${ASCEND_DEVICE_ID} + else + mkdir -p $cur_path/output/${ASCEND_DEVICE_ID} + fi + +#训练执行脚本,需要根据网络修改 + corenum=`cat /proc/cpuinfo |grep 'processor' |wc -l` + let a=RANK_ID*${corenum}/8 + let b=RANK_ID+1 + let c=b*${corenum}/8-1 + if [ "x${bind_core}" != x ];then + bind_core="taskset -c $a-$c" + fi + nohup ${bind_core} python3 -u ./object_detection/model_main_rt.py \ + --pipeline_config_path=${pipeline_config} \ + --model_dir=$cur_path/output/${ASCEND_DEVICE_ID_START} \ + --data_path=${data_path} \ + --alsologtostder \ + --amp \ + --num_train_steps=${num_train_steps} \ + "${@:1}" > $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +done +wait + +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +##########################业务日志######################### +grep ERROR $HOME/ascend/log/plog/*.log > $cur_path/output/$ASCEND_DEVICE_ID_START/plog_err.log + +################################性能结果处理######################### +echo "-----------------------Final result------------------------" +#性能FPS计算,需要根据网络修改 +FPS=`grep -a 'INFO:tensorflow:global_step/sec: ' $cur_path/output/$ASCEND_DEVICE_ID_START/train_$ASCEND_DEVICE_ID_START.log|awk 'END {print $2}'` + +FPS=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*'${FPS}'*'${RANK_SIZE}'}'` +echo "Final Performance images/sec : $FPS" +################################精度结果处理######################### +#精度计算,需要根据网络修改 +train_accuracy=`grep Precision $cur_path/output/$ASCEND_DEVICE_ID_START/train_$ASCEND_DEVICE_ID_START.log|grep Average |awk 'NR==1 {print $13}'` + +echo 'Final Training Accuracy mAP: $train_accuracy' +################################E2E训练时长########################## +echo "Final Training Duration sec : $e2e_time" + +################################性能看护############################# +DeviceType=`uname -m` +CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'RT2'_'acc' +ActualFPS=${FPS} +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${batch_size}'*1000/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型修改 +grep INFO:tensorflow:loss $cur_path/output/$ASCEND_DEVICE_ID_START/train_$ASCEND_DEVICE_ID_START.log|awk '{print $3}'|sed 's/,//g'|sed '/^$/d' >> $cur_path/output/$ASCEND_DEVICE_ID_START/train_${CaseName}_loss.txt + +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID_START/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID_START/${CaseName}.log +echo "BatchSize = ${batch_size}" >> $cur_path/output/$ASCEND_DEVICE_ID_START/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID_START/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID_START/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID_START/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID_START/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID_START/${CaseName}.log +echo "TrainingAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID_START/${CaseName}.log + diff --git a/TensorFlow/built-in/nlp/FastText_ID0135_for_TensorFlow/test/train_RT2_full_1p.sh b/TensorFlow/built-in/nlp/FastText_ID0135_for_TensorFlow/test/train_RT2_full_1p.sh new file mode 100644 index 0000000000000000000000000000000000000000..ce08a6826d3c6362205b8387ddb0b5f289773084 --- /dev/null +++ b/TensorFlow/built-in/nlp/FastText_ID0135_for_TensorFlow/test/train_RT2_full_1p.sh @@ -0,0 +1,184 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` + +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=0 + +# 数据集路径,保持为空,不需要修改 +data_path="" + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="FastText_ID0135_for_TensorFlow" +#训练epoch +train_epochs=10 +#训练step +train_steps=250000 +#训练batch_size +batch_size=1024 +#学习率 +learning_rate=0.01 + +#维测参数,precision_mode需要模型审视修改 +precision_mode="allow_mix_precision" +#维持参数,以下不需要修改 +over_dump=False +data_dump_flag=False +data_dump_step="10" +profiling=False +loss_scale=True + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1P.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + --max_step # of step for training + --learning_rate learning rate + --batch batch size + --modeldir model dir + --save_interval save interval for ckpt + --loss_scale enable loss scale ,default is False + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --max_step* ]];then + train_steps=`echo ${para#*=}` + elif [[ $para == --learning_rate* ]];then + learning_rate=`echo ${para#*=}` + elif [[ $para == --batch* ]];then + batch_size=`echo ${para#*=}` + elif [[ $para == --modeldir* ]];then + modeldir=`echo ${para#*=}` + elif [[ $para == --save_interval* ]];then + save_interval=`echo ${para#*=}` + elif [[ $para == --loss_scale* ]];then + loss_scale=`echo ${para#*=}` + elif [[ $para == --epoch* ]];then + train_epochs=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be config" + exit 1 +fi +#############执行训练######################### +#训练开始时间,不需要修改 +start_time=$(date +%s) +cd $cur_path/../ +#进入训练脚本目录,需要模型审视修改 +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + #--data_path, --model_dir, --precision_mode, --precision_mode, --over_dump, --over_dump_path,--data_dump_flag,--data_dump_step,--data_dump_path,--profiling,--profiling_dump_path,--autotune + nohup python3 main_rt.py \ + --epoch=${train_epochs} \ + --data_path=${data_path} \ + --precision_mode=${precision_mode} \ + --loss_scale=${loss_scale} \ + --over_dump=${over_dump} \ + --over_dump_path=${over_dump_path} \ + --data_dump_flag=${data_dump_flag} \ + --data_dump_step=${data_dump_step} \ + --data_dump_path=${data_dump_path} \ + --batch_size=${batch_size} \ + --profiling=${profiling} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +if [ $? -ne 0 ];then + exit 1 +fi +done +wait + +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +#输出性能FPS,需要模型审视修改 +TrainingTime=`grep "us/step" $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk -F "/" 'END {print $2}' | awk -F " " '{print $5}' | awk -F "us" '{print $1}'` +FPS=`awk 'BEGIN {printf "%.2f\n", '1000'*'1000'*'${batch_size}'/'${TrainingTime}'}'` +#打印,不需要修改 +echo "Final Performance images/sec : $FPS" + +#输出训练精度,需要模型审视修改 +train_accuracy=`grep "Accuracy:" $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk -F " " '{print $2}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" +echo "E2E Training Duration sec : $e2e_time" + +#性能看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'RT2'_'acc' + +##获取性能数据,不需要修改 +#吞吐量 +ActualFPS=${FPS} + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +grep "loss:" $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log | awk -F "loss:" '{print $2}' | awk -F " " '{print $1}' > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}_loss.txt +#最后一个迭代loss值,不需要修改 +ActualLoss=(`awk 'END {print $NF}' $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}_loss.txt`) + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log diff --git a/TensorFlow/built-in/recommendation/DeepFM_ID0030_for_TensorFlow/test/train_RT2_full_8p.sh b/TensorFlow/built-in/recommendation/DeepFM_ID0030_for_TensorFlow/test/train_RT2_full_8p.sh new file mode 100644 index 0000000000000000000000000000000000000000..d8300c4c00933df01a62e78cf180b7a07a2e9bfb --- /dev/null +++ b/TensorFlow/built-in/recommendation/DeepFM_ID0030_for_TensorFlow/test/train_RT2_full_8p.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` + +export FLAG_ENABLE_DUMP=False +export DUMP_PATH=/var/log/npu/dump +export DUMP_STEP="0|2" +export DUMP_MODE="all" +mkdir -p $DUMP_PATH + +#集合通信参数,不需要修改 +#保证rank table file 文件rank_table_8p.json存放在和test同级的configs目录下 +export RANK_SIZE=8 +batch_size=16000 +export RANK_TABLE_FILE=${cur_path}/../configs/rank_table_8p.json +export JOB_ID=10087 +RANK_ID_START=0 + +sed -i "s/n_epoches = 2/n_epoches = 20/g" `grep -rl "n_epoches = 2" ${cur_path}/../configs/config.py` + +# 数据集路径,保持为空,不需要修改 +data_path="" + + +#基础参数 需要模型审视修改 +#网络名称,同目录名称 +Network="DeepFM_ID0030_for_TensorFlow" + +#维持参数,以下不需要修改 +over_dump=False +data_dump_flag=False +data_dump_step="10" +profiling=False +#参数校验,不需要修改 +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --autotune* ]];then + autotune=`echo ${para#*=}` + mv $install_path/fwkacllib/data/rl/Ascend910/custom $install_path/fwkacllib/data/rl/Ascend910/custom_bak + mv $install_path/fwkacllib/data/tiling/Ascend910/custom $install_path/fwkacllib/data/tiling/Ascend910/custom_bak + autotune_dump_path=${cur_path}/output/autotune_dump + mkdir -p ${autotune_dump_path}/GA + mkdir -p ${autotune_dump_path}/rl + cp -rf $install_path/fwkacllib/data/tiling/Ascend910/custom ${autotune_dump_path}/GA/ + cp -rf $install_path/fwkacllib/data/rl/Ascend910/custom ${autotune_dump_path}/rl/ + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --bind_core* ]]; then + bind_core=`echo ${para#*=}` + name_bind="_bindcore" + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#autotune时,先开启autotune执行单P训练,不需要修改 +if [[ $autotune == True ]]; then + sh -x train_full_1p.sh --autotune=$autotune --data_path=$data_path + wait + cp -rf $install_path/fwkacllib/data/tiling/Ascend910/custom ${autotune_dump_path}/GA/ + cp -rf $install_path/fwkacllib/data/rl/Ascend910/custom ${autotune_dump_path}/RL/ + wait + autotune=False + export autotune=False + + export RANK_SIZE=8 + export RANK_TABLE_FILE=${cur_path}/../configs/rank_table_8p.json + export JOB_ID=10087 + RANK_ID_START=0 + unset TE_PARALLEL_COMPILER +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + echo "Device ID: $RANK_ID" + export RANK_ID=$RANK_ID + export ASCEND_DEVICE_ID=$RANK_ID + export DEVICE_ID=$RANK_ID + ASCEND_DEVICE_ID=$RANK_ID + + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + EXEC_DIR=$(pwd) + + corenum=`cat /proc/cpuinfo |grep 'processor' |wc -l` + let a=RANK_ID*${corenum}/8 + let b=RANK_ID+1 + let c=b*${corenum}/8-1 + if [ "x${bind_core}" != x ];then + bind_core="taskset -c $a-$c" + fi + nohup ${bind_core} python3.7 ${EXEC_DIR}/../train.py > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +#结果打印,不需要修改 +echo "E2E training Duration sec: $e2e_time" + +#稳定性精度看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}${name_bind}_bs${BatchSize}_${RANK_SIZE}'p'_'RT2'_'acc' + +TrainAccuracy=`grep "eval auc:" $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END {print $8}'` +#打印,不需要修改 +echo "Final Train Accuracy : ${TrainAccuracy}" +echo "E2E Training Duration sec : $e2e_time" + +##获取性能数据 +ActualFPS=`grep "fps" $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log | awk 'END {print $22}'` +temp1=`echo "1000 * ${batch_size} * ${RANK_SIZE}"|bc` +TrainingTime=`echo "scale=2;${temp1} / ${ActualFPS}"|bc` +#ActualFPS=`echo "${RANK_SIZE} * ${FPS}"|bc` +grep 'loss =' $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk '{print $9}' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt +ActualLoss=`awk 'END {print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${TrainAccuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log