From 5cdaa6fb81ded828c9135d8ef733fd8da462aea2 Mon Sep 17 00:00:00 2001 From: ywx1053982 Date: Tue, 29 Mar 2022 14:21:45 +0800 Subject: [PATCH 1/3] update --- .../Action/training/train.py | 8 ++++++++ .../test/train_full_1p.sh | 10 ++++++++-- .../test/train_performance_1p.sh | 10 ++++++++-- .../MiniGo_ID0629_for_TensorFlow/dual_net.py | 11 ++++++++++- .../test/train_full_1p.sh | 9 +++++++-- .../test/train_full_8p.sh | 9 +++++++-- .../test/train_performance_1p.sh | 16 ++++++++++++++-- .../test/train_performance_8p.sh | 16 ++++++++++++++-- .../MiniGo_ID0629_for_TensorFlow/train.py | 1 + .../examples/00_quick_start/naml_MIND.py | 11 +++++++++-- .../test/train_full_1p.sh | 10 ++++++++-- .../test/train_full_8p.sh | 13 ++++++++++++- .../test/train_performance_1p.sh | 10 ++++++++-- .../test/train_performance_8p.sh | 10 ++++++++-- 14 files changed, 122 insertions(+), 22 deletions(-) diff --git a/TensorFlow/built-in/cv/detection/OpenPose_ID0117_for_TensorFlow/Action/training/train.py b/TensorFlow/built-in/cv/detection/OpenPose_ID0117_for_TensorFlow/Action/training/train.py index 0f2c629d1..711614627 100644 --- a/TensorFlow/built-in/cv/detection/OpenPose_ID0117_for_TensorFlow/Action/training/train.py +++ b/TensorFlow/built-in/cv/detection/OpenPose_ID0117_for_TensorFlow/Action/training/train.py @@ -51,6 +51,8 @@ parser.add_argument('--train_epoch', dest='train_epoch', type=int, default=2000, parser.add_argument('--modeldir', dest='modeldir', default='./ckpt', help='ckpt dir') parser.add_argument('--learning_rate', dest='learning_rate', type=float, default=0.0001, help='learning rate') parser.add_argument('--batch_size', dest='batch_size', type=int, default=64, help='# images in batch') + +parser.add_argument("--dynamic_input", type=str, default='1', help="--dynamic_input=1 Use fuzzy compilation. --dynamic_input=lazy_recompile Compile using lazy static graph") args = parser.parse_args() @@ -185,6 +187,12 @@ sess_config = tf.ConfigProto() custom_op = sess_config.graph_options.rewrite_options.custom_optimizers.add() custom_op.name = "NpuOptimizer" custom_op.parameter_map["dynamic_input"].b = True +if args.dynamic_input == "lazy_recompile": + custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile") +elif args.dynamic_input == "1": + custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("dynamic_execute") +else: + print("Enter correct compilation parameters.") custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile") custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(args.precision_mode) if args.data_dump_flag.strip() == "True": diff --git a/TensorFlow/built-in/cv/detection/OpenPose_ID0117_for_TensorFlow/test/train_full_1p.sh b/TensorFlow/built-in/cv/detection/OpenPose_ID0117_for_TensorFlow/test/train_full_1p.sh index 715ba8698..a9a5c424c 100644 --- a/TensorFlow/built-in/cv/detection/OpenPose_ID0117_for_TensorFlow/test/train_full_1p.sh +++ b/TensorFlow/built-in/cv/detection/OpenPose_ID0117_for_TensorFlow/test/train_full_1p.sh @@ -24,6 +24,8 @@ train_epoch=2000 batch_size=32 #学习率 learning_rate=0.0001 +#动态输入模式,不需要修改 +dynamic_input="" #维测参数,precision_mode需要模型审视修改 @@ -94,6 +96,8 @@ do batch_size=`echo ${para#*=}` elif [[ $para == --modeldir* ]];then modeldir=`echo ${para#*=}` + elif [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` fi done @@ -139,7 +143,8 @@ do --batch=${batch_size} \ --profiling=${profiling} \ --profiling_dump_path=${profiling_dump_path} \ - --autotune=${autotune} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & + --autotune=${autotune} \ + --dynamic_input=${dynamic_input} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & if [ $? -ne 0 ];then exit 1 fi @@ -205,4 +210,5 @@ echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log -echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DynamicInput = ${dynamic_input}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow/built-in/cv/detection/OpenPose_ID0117_for_TensorFlow/test/train_performance_1p.sh b/TensorFlow/built-in/cv/detection/OpenPose_ID0117_for_TensorFlow/test/train_performance_1p.sh index cb68c2a48..a1e197b49 100644 --- a/TensorFlow/built-in/cv/detection/OpenPose_ID0117_for_TensorFlow/test/train_performance_1p.sh +++ b/TensorFlow/built-in/cv/detection/OpenPose_ID0117_for_TensorFlow/test/train_performance_1p.sh @@ -24,6 +24,8 @@ train_epoch=20 batch_size=32 #学习率 learning_rate=0.0001 +#动态输入模式,不需要修改 +dynamic_input="" #维测参数,precision_mode需要模型审视修改 @@ -83,6 +85,8 @@ do batch_size=`echo ${para#*=}` elif [[ $para == --modeldir* ]];then modeldir=`echo ${para#*=}` + elif [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` fi done @@ -127,7 +131,8 @@ do --data_dump_path=${data_dump_path} \ --batch=${batch_size} \ --profiling=${profiling} \ - --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & + --profiling_dump_path=${profiling_dump_path} \ + --dynamic_input=${dynamic_input} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & if [ $? -ne 0 ];then exit 1 fi @@ -193,4 +198,5 @@ echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}. echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log -echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DynamicInput = ${dynamic_input}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/dual_net.py b/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/dual_net.py index 14bb51bad..038dfdec6 100644 --- a/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/dual_net.py +++ b/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/dual_net.py @@ -190,6 +190,9 @@ flags.DEFINE_string( 'input_layout', 'nhwc', help='Layout of input features: "nhwc" or "nchw"') +flags.DEFINE_string( + 'dynamic_input', '1', + help='--dynamic_input=1 Use fuzzy compilation. --dynamic_input=lazy_recompile Compile using lazy static graph') # TODO(seth): Verify if this is still required. flags.register_multi_flags_validator( @@ -209,7 +212,13 @@ class DualNetwork(): custom_op = global_config.graph_options.rewrite_options.custom_optimizers.add() custom_op.name = "NpuOptimizer" custom_op.parameter_map["dynamic_input"].b = True - custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile") + print('========= DualNetwork DYNAMIC INPUT = %s =========' % FLAGS.dynamic_input) + if FLAGS.dynamic_input == "lazy_recompile": + custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile") + elif FLAGS.dynamic_input == "1": + custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("dynamic_execute") + else: + print("Enter correct compilation parameters.") global_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF global_config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF diff --git a/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_full_1p.sh b/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_full_1p.sh index 47a9946c9..a6f75350e 100644 --- a/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_full_1p.sh +++ b/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_full_1p.sh @@ -17,6 +17,8 @@ train_epochs= train_steps=80000 #学习率 learning_rate= +#动态输入模式,不需要修改 +dynamic_input="" #参数配置 npu param precision_mode="allow_fp32_to_fp16" @@ -36,6 +38,8 @@ for para in $* do if [[ $para == --data_path* ]];then data_path=`echo ${para#*=}` + elif [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` fi done @@ -61,7 +65,7 @@ wait start=$(date +%s) #(Step3)训练 -python3 train.py --training_data_path=$data_path --steps_to_train=$train_steps --train_batch_size=$batch_size --work_dir=$cur_path/estimator_working_dir --export_path=$cur_path/outputs/models/000001-first_generation > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +python3 train.py --training_data_path=$data_path --steps_to_train=$train_steps --train_batch_size=$batch_size --work_dir=$cur_path/estimator_working_dir --export_path=$cur_path/outputs/models/000001-first_generation --dynamic_input=${dynamic_input}> $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & wait end=$(date +%s) e2etime=$(( $end - $start )) @@ -96,4 +100,5 @@ echo "TrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${Ca echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "TrainAccuracy = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log -echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file +echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DynamicInput = ${dynamic_input}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_full_8p.sh b/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_full_8p.sh index 56dbf5f33..8cd78e2f2 100644 --- a/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_full_8p.sh +++ b/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_full_8p.sh @@ -17,6 +17,8 @@ train_epochs= train_steps=80000 #学习率 learning_rate= +#动态输入模式,不需要修改 +dynamic_input="" #参数配置 npu param precision_mode="allow_fp32_to_fp16" @@ -39,6 +41,8 @@ do elif [[ $para == --bind_core* ]]; then bind_core=`echo ${para#*=}` name_bind="_bindcore" + elif [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` fi done @@ -85,7 +89,7 @@ do if [ "x${bind_core}" != x ];then bind_core="taskset -c $a-$c" fi - ${bind_core} python3 train.py --training_data_path=$data_path --steps_to_train=$train_steps --train_batch_size=$batch_size --work_dir=$cur_path/estimator_working_dir --export_path=$cur_path/outputs/models/000001-first_generation > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & + ${bind_core} python3 train.py --training_data_path=$data_path --steps_to_train=$train_steps --train_batch_size=$batch_size --work_dir=$cur_path/estimator_working_dir --export_path=$cur_path/outputs/models/000001-first_generation --dynamic_input=${dynamic_input}> $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & done wait @@ -122,4 +126,5 @@ echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "TrainAccuracy = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log -echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file +echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DynamicInput = ${dynamic_input}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_performance_1p.sh b/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_performance_1p.sh index b7e8dc315..2b9da3177 100644 --- a/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_performance_1p.sh +++ b/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_performance_1p.sh @@ -17,6 +17,8 @@ train_epochs= train_steps=500 #学习率 learning_rate= +#动态输入模式,不需要修改 +dynamic_input="" #参数配置 npu param precision_mode="allow_fp32_to_fp16" @@ -36,6 +38,8 @@ for para in $* do if [[ $para == --data_path* ]];then data_path=`echo ${para#*=}` + elif [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` fi done @@ -61,7 +65,14 @@ wait start=$(date +%s) #(Step3)训练 -python3 train.py --training_data_path=$data_path --steps_to_train=$train_steps --train_batch_size=$batch_size --work_dir=$cur_path/estimator_working_dir --export_path=$cur_path/outputs/models/000001-first_generation > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +#python3 train.py --training_data_path=$data_path --steps_to_train=$train_steps --train_batch_size=$batch_size --work_dir=$cur_path/estimator_working_dir --export_path=$cur_path/outputs/models/000001-first_generation > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +python3 train.py \ + --training_data_path=$data_path \ + --steps_to_train=$train_steps \ + --train_batch_size=$batch_size \ + --work_dir=$cur_path/estimator_working_dir \ + --export_path=$cur_path/outputs/models/000001-first_generation \ + --dynamic_input=${dynamic_input} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & wait end=$(date +%s) e2etime=$(( $end - $start )) @@ -96,4 +107,5 @@ echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "TrainAccuracy = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log -echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file +echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DynamicInput = ${dynamic_input}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_performance_8p.sh b/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_performance_8p.sh index 446dafd10..d52fd0d57 100644 --- a/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_performance_8p.sh +++ b/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/test/train_performance_8p.sh @@ -17,6 +17,8 @@ train_epochs= train_steps=500 #学习率 learning_rate= +#动态输入模式,不需要修改 +dynamic_input="" #参数配置 npu param precision_mode="allow_fp32_to_fp16" @@ -39,6 +41,8 @@ do elif [[ $para == --bind_core* ]]; then bind_core=`echo ${para#*=}` name_bind="_bindcore" + elif [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` fi done @@ -85,7 +89,14 @@ do if [ "x${bind_core}" != x ];then bind_core="taskset -c $a-$c" fi - ${bind_core} python3 train.py --training_data_path=$data_path --steps_to_train=$train_steps --train_batch_size=$batch_size --work_dir=$cur_path/estimator_working_dir --export_path=$cur_path/outputs/models/000001-first_generation > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & + #${bind_core} python3 train.py --training_data_path=$data_path --steps_to_train=$train_steps --train_batch_size=$batch_size --work_dir=$cur_path/estimator_working_dir --export_path=$cur_path/outputs/models/000001-first_generation > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & + ${bind_core} python3 train.py \ + --training_data_path=$data_path \ + --steps_to_train=$train_steps \ + --train_batch_size=$batch_size \ + --work_dir=$cur_path/estimator_working_dir \ + --export_path=$cur_path/outputs/models/000001-first_generation \ + --dynamic_input=${dynamic_input} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & done wait @@ -122,4 +133,5 @@ echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "TrainAccuracy = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log -echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file +echo "E2ETrainingTime = ${e2etime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DynamicInput = ${dynamic_input}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/train.py b/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/train.py index c55d271b7..ff0e5a499 100644 --- a/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/train.py +++ b/TensorFlow/built-in/cv/image_classification/MiniGo_ID0629_for_TensorFlow/train.py @@ -140,6 +140,7 @@ flags.declare_key_flag('work_dir') flags.declare_key_flag('train_batch_size') flags.declare_key_flag('num_tpu_cores') flags.declare_key_flag('use_tpu') +flags.declare_key_flag('dynamic_input') FLAGS = flags.FLAGS diff --git a/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/recommenders-master/examples/00_quick_start/naml_MIND.py b/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/recommenders-master/examples/00_quick_start/naml_MIND.py index f1daed12f..f3bfabed5 100644 --- a/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/recommenders-master/examples/00_quick_start/naml_MIND.py +++ b/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/recommenders-master/examples/00_quick_start/naml_MIND.py @@ -82,8 +82,13 @@ def main(): custom_op.parameter_map["hcom_parallel"].b = True custom_op.parameter_map["dynamic_input"].b = True - custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile") - + if args.dynamic_input == "lazy_recompile": + custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile") + elif args.dynamic_input == "1": + custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("dynamic_execute") + else: + print("Enter correct compilation parameters.") + sess_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF sess_config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF sess = tf.compat.v1.Session(config=sess_config) @@ -142,6 +147,8 @@ def parse_args(): help="""the max train steps""") parser.add_argument('--MIND_type', default='small', choices=["demo", "small", "large"], help = """the type of MIND data""") + parser.add_argument('--dynamic_input', type=str, default='1', + help="--dynamic_input=1 Use fuzzy compilation. --dynamic_input=lazy_recompile Compile using lazy static graph") args, unknown_args = parser.parse_known_args() if len(unknown_args) > 0: diff --git a/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_full_1p.sh b/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_full_1p.sh index e4f7ef13d..0d5815526 100644 --- a/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_full_1p.sh +++ b/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_full_1p.sh @@ -24,6 +24,8 @@ batch_size=32 train_epochs=10 #训练步数 train_steps=100 +#动态输入模式,不需要修改 +dynamic_input="" #维测参数,precision_mode需要模型审视修改 #precision_mode="allow_mix_precision" @@ -75,6 +77,8 @@ do cp -rf $install_path/fwkacllib/data/rl/Ascend910/custom ${autotune_dump_path}/RL/ elif [[ $para == --data_path* ]];then data_path=`echo ${para#*=}` + elif [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` fi done @@ -110,7 +114,8 @@ do nohup python3 naml_MIND.py \ --data_path=$data_path \ --epochs=$train_epochs \ - --model_path=${cur_path}/output/${ASCEND_DEVICE_ID} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & + --model_path=${cur_path}/output/${ASCEND_DEVICE_ID} \ + --dynamic_input=${dynamic_input} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & done wait @@ -166,4 +171,5 @@ echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log -echo "TrainAccuracy = ${TrainAccuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file +echo "TrainAccuracy = ${TrainAccuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DynamicInput = ${dynamic_input}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_full_8p.sh b/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_full_8p.sh index 801a399a8..c86a3e1ae 100644 --- a/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_full_8p.sh +++ b/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_full_8p.sh @@ -10,6 +10,16 @@ export JOB_ID=10018 export RANK_SIZE=8 export RANK_TABLE_FILE=${currentDir}/hccl_${RANK_SIZE}p.json +#动态输入模式,不需要修改 +dynamic_input="" + +for para in $* +do + if [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` + fi +done + for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); do export RANK_ID=$RANK_ID @@ -24,6 +34,7 @@ do --model_path=${currentDir}/output/${ASCEND_DEVICE_ID} \ --data_path=${currentDir}/data \ --epochs=1 \ - --max_steps=1000 > ${currentDir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & + --max_steps=1000 \ + --dynamic_input=${dynamic_input} > ${currentDir}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & done diff --git a/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_performance_1p.sh b/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_performance_1p.sh index 558e920be..af1581781 100644 --- a/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_performance_1p.sh +++ b/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_performance_1p.sh @@ -24,6 +24,8 @@ batch_size=32 train_epochs=1 #训练步数 train_steps=1000 +#动态输入模式,不需要修改 +dynamic_input="" #维测参数,precision_mode需要模型审视修改 #precision_mode="allow_mix_precision" @@ -75,6 +77,8 @@ do cp -rf $install_path/fwkacllib/data/rl/Ascend910/custom ${autotune_dump_path}/RL/ elif [[ $para == --data_path* ]];then data_path=`echo ${para#*=}` + elif [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` fi done @@ -111,7 +115,8 @@ do --data_path=$data_path \ --epochs=$train_epochs \ --max_steps=$train_steps \ - --model_path=${cur_path}/output/${ASCEND_DEVICE_ID} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & + --model_path=${cur_path}/output/${ASCEND_DEVICE_ID} \ + --dynamic_input=${dynamic_input} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & done wait @@ -155,4 +160,5 @@ echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}. echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log -echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DynamicInput = ${dynamic_input}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_performance_8p.sh b/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_performance_8p.sh index 6417e0ec1..76da09279 100644 --- a/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_performance_8p.sh +++ b/TensorFlow/built-in/recommendation/NAML_ID0113_for_TensorFlow/test/train_performance_8p.sh @@ -24,6 +24,8 @@ batch_size=32 train_epochs=1 #训练步数 train_steps=1000 +#动态输入模式,不需要修改 +dynamic_input="" #维测参数,precision_mode需要模型审视修改 #precision_mode="allow_mix_precision" @@ -78,6 +80,8 @@ do elif [[ $para == --bind_core* ]]; then bind_core=`echo ${para#*=}` name_bind="_bindcore" + elif [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` fi done @@ -125,7 +129,8 @@ do --data_path=$data_path \ --epochs=$train_epochs \ --max_steps=$train_steps \ - --model_path=${cur_path}/output/${ASCEND_DEVICE_ID} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & + --model_path=${cur_path}/output/${ASCEND_DEVICE_ID} \ + --dynamic_input=${dynamic_input} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & done wait @@ -170,4 +175,5 @@ echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}. echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log -echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DynamicInput = ${dynamic_input}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file -- Gitee From 33cab82a47f7efc3eef39b17f77d20318d857509 Mon Sep 17 00:00:00 2001 From: ywx1053982 Date: Tue, 29 Mar 2022 14:34:57 +0800 Subject: [PATCH 2/3] update AUTOAUGMENT_ID0708_for_TensorFlow --- .../test/train_full_1p.sh | 11 ++++-- .../test/train_full_8p.sh | 11 ++++-- .../test/train_performance_1p.sh | 13 ++++--- .../test/train_performance_8p.sh | 11 ++++-- .../train.py | 13 +++++-- .../.idea/workspace.xml | 35 +++++++++++++++++++ 6 files changed, 78 insertions(+), 16 deletions(-) create mode 100644 TensorFlow/built-in/nlp/Siamese_ID0506_for_TensorFlow/.idea/workspace.xml diff --git a/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_full_1p.sh b/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_full_1p.sh index fd6a59516..ee02d0a4d 100644 --- a/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_full_1p.sh +++ b/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_full_1p.sh @@ -15,6 +15,8 @@ train_epochs=200 train_steps= #学习率 learning_rate= +#动态输入模式,不需要修改 +dynamic_input="" #参数配置 data_path="/root/.keras/datasets/cifar-10-batches-py.tar.gz" @@ -26,8 +28,10 @@ fi for para in $* do - if [[ $para == --data_path* ]];then - data_path=`echo ${para#*=}` + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` fi done @@ -56,7 +60,7 @@ fi wait start=$(date +%s) -nohup python3 -u train.py --epochs=${train_epochs} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +nohup python3 -u train.py --epochs=${train_epochs} --dynamic_input=${dynamic_input} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & wait end=$(date +%s) @@ -114,3 +118,4 @@ echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DynamicInput = ${dynamic_input}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_full_8p.sh b/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_full_8p.sh index 44ed1790a..fa5877916 100644 --- a/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_full_8p.sh +++ b/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_full_8p.sh @@ -24,6 +24,8 @@ train_epochs=200 train_steps= #学习率 learning_rate= +#动态输入模式,不需要修改 +dynamic_input="" #参数配置 data_path="/root/.keras/datasets/cifar-10-batches-py.tar.gz" @@ -35,8 +37,10 @@ fi for para in $* do - if [[ $para == --data_path* ]];then - data_path=`echo ${para#*=}` + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` fi done @@ -70,7 +74,7 @@ do mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID fi echo $ASCEND_DEVICE_ID - nohup python3 train.py --epochs=${train_epochs} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & + nohup python3 train.py --epochs=${train_epochs} --dynamic_input=${dynamic_input} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & done wait @@ -131,3 +135,4 @@ echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DynamicInput = ${dynamic_input}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_performance_1p.sh b/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_performance_1p.sh index 472942f53..24a278920 100644 --- a/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_performance_1p.sh +++ b/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_performance_1p.sh @@ -15,6 +15,8 @@ train_epochs=3 train_steps= #学习率 learning_rate= +#动态输入模式,不需要修改 +dynamic_input="" #参数配置 data_path="/root/.keras/datasets/cifar-10-batches-py.tar.gz" @@ -26,8 +28,10 @@ fi for para in $* do - if [[ $para == --data_path* ]];then - data_path=`echo ${para#*=}` + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` fi done @@ -56,7 +60,7 @@ fi wait start=$(date +%s) -nohup python3 -u train.py --epochs=${train_epochs} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & +nohup python3 -u train.py --epochs=${train_epochs} --dynamic_input=${dynamic_input} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & wait end=$(date +%s) @@ -113,4 +117,5 @@ echo "ActualFPS = ${ActualFPS}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${Cas echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log -echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DynamicInput = ${dynamic_input}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_performance_8p.sh b/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_performance_8p.sh index 6aad86020..5852a0d79 100644 --- a/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_performance_8p.sh +++ b/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/test/train_performance_8p.sh @@ -24,6 +24,8 @@ train_epochs=1 train_steps= #学习率 learning_rate= +#动态输入模式,不需要修改 +dynamic_input="" #参数配置 data_path="/root/.keras/datasets/cifar-10-batches-py.tar.gz" @@ -35,8 +37,10 @@ fi for para in $* do - if [[ $para == --data_path* ]];then - data_path=`echo ${para#*=}` + if [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + elif [[ $para == --dynamic_input* ]];then + dynamic_input=`echo ${para#*=}` fi done @@ -70,7 +74,7 @@ do mkdir -p $cur_path/test/output/$ASCEND_DEVICE_ID fi echo $ASCEND_DEVICE_ID - nohup python3 train.py > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & + nohup python3 train.py --dynamic_input=${dynamic_input} > $cur_path/test/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & done wait @@ -131,3 +135,4 @@ echo "TrainingTime = ${TrainingTime}" >> $cur_path/test/output/$ASCEND_DEVICE_ID echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "ActualLoss = ${ActualLoss}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DynamicInput = ${dynamic_input}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file diff --git a/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/train.py b/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/train.py index 7cf972356..80100785e 100644 --- a/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/train.py +++ b/TensorFlow/built-in/cv/image_classification/AUTOAUGMENT_ID0708_for_TensorFlow/train.py @@ -62,14 +62,16 @@ def parse_args(): parser.add_argument('--batch-size', default=128, type=int) parser.add_argument('--cutout', default=False, type=str2bool) parser.add_argument('--auto-augment', default=False, type=str2bool) - + parser.add_argument("--dynamic_input", type=str, default='1', + help="--dynamic_input=1 Use fuzzy compilation. --dynamic_input=lazy_recompile Compile using lazy static graph") args = parser.parse_args() return args +args = parse_args() def main(): - args = parse_args() + # args = parse_args() if args.name is None: args.name = 'WideResNet%s-%s' %(args.depth, args.width) @@ -132,7 +134,12 @@ if __name__ == '__main__': custom_op.name = "NpuOptimizer" custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") custom_op.parameter_map["dynamic_input"].b = 1 - custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile") + if args.dynamic_input == "lazy_recompile": + custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile") + elif args.dynamic_input == "1": + custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("dynamic_execute") + else: + print("Enter correct compilation parameters.") npu_keras_sess = set_keras_session_npu_config(config=global_config) # ***** npu modify end ***** main() diff --git a/TensorFlow/built-in/nlp/Siamese_ID0506_for_TensorFlow/.idea/workspace.xml b/TensorFlow/built-in/nlp/Siamese_ID0506_for_TensorFlow/.idea/workspace.xml new file mode 100644 index 000000000..1c4af8c8e --- /dev/null +++ b/TensorFlow/built-in/nlp/Siamese_ID0506_for_TensorFlow/.idea/workspace.xml @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + + + + + + + + 1648516902252 + + + + \ No newline at end of file -- Gitee From 0106ef41e62293277da0c5426e362535fdb7c3fc Mon Sep 17 00:00:00 2001 From: ywx1053982 Date: Tue, 29 Mar 2022 14:39:54 +0800 Subject: [PATCH 3/3] update AUTOAUGMENT_ID0708_for_TensorFlow --- .../.idea/workspace.xml | 35 ------------------- 1 file changed, 35 deletions(-) delete mode 100644 TensorFlow/built-in/nlp/Siamese_ID0506_for_TensorFlow/.idea/workspace.xml diff --git a/TensorFlow/built-in/nlp/Siamese_ID0506_for_TensorFlow/.idea/workspace.xml b/TensorFlow/built-in/nlp/Siamese_ID0506_for_TensorFlow/.idea/workspace.xml deleted file mode 100644 index 1c4af8c8e..000000000 --- a/TensorFlow/built-in/nlp/Siamese_ID0506_for_TensorFlow/.idea/workspace.xml +++ /dev/null @@ -1,35 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - 1648516902252 - - - - \ No newline at end of file -- Gitee