diff --git a/TensorFlow/built-in/recommendation/DeepCTR_Series_for_TensorFlow/examples/run_fwfm_profiling.py b/TensorFlow/built-in/recommendation/DeepCTR_Series_for_TensorFlow/examples/run_fwfm_profiling.py new file mode 100644 index 0000000000000000000000000000000000000000..ed6eb14282232811a09e914e81a5302ea4d17a59 --- /dev/null +++ b/TensorFlow/built-in/recommendation/DeepCTR_Series_for_TensorFlow/examples/run_fwfm_profiling.py @@ -0,0 +1,124 @@ +# +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from npu_bridge.npu_init import * +from tensorflow import keras +import pandas as pd +from sklearn.metrics import log_loss, roc_auc_score +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import LabelEncoder, MinMaxScaler + +from deepctr.models import FwFM +from deepctr.feature_column import SparseFeat, DenseFeat, get_feature_names + +import argparse +import os + +def main(): + + parser = argparse.ArgumentParser() + parser.add_argument('--data_dir', default="./", + help='data path for train') + parser.add_argument('--precision_mode', default='allow_fp32_to_fp16', + help='allow_fp32_to_fp16/force_fp16/ ' + 'must_keep_origin_dtype/allow_mix_precision.') + parser.add_argument('--profiling', default=False, + help='if or not profiling for performance debug, default is False') + parser.add_argument('--profiling_dump_path', default="/home/data", + help='the path to save profiling data') + args = parser.parse_args() + + sess_config = tf.ConfigProto() + custom_op = sess_config.graph_options.rewrite_options.custom_optimizers.add() + sess_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF + sess_config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF + custom_op.name = "NpuOptimizer" + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(args.precision_mode) + custom_op.parameter_map["profiling_mode"].b = True + custom_op.parameter_map["profiling_options"].s = tf.compat.as_bytes('{"output":"./profiling","training_trace":"on","task_trace":"on","fp_point":"","bp_point":"","aic_metrics":"PipeUtilization"}') + #if args.profiling: + # custom_op.parameter_map["profiling_mode"].b = True + # custom_op.parameter_map["profiling_options"].s = tf.compat.as_bytes( + # '{"output":"' + args.profiling_dump_path + '", \ + # "training_trace":"on", \ + # "task_trace":"on", \ + # "aicpu":"on", \ + # "aic_metrics":"PipeUtilization",\ + # "fp_point":"concatenate_1/concat", \ + # "bp_point":"training/Adam/gradients/gradients/AddN_83"}') + + npu_keras_sess = set_keras_session_npu_config(config=sess_config) + data = pd.read_csv(os.path.join(args.data_dir, 'criteo_sample.txt')) + + sparse_features = ['C' + str(i) for i in range(1, 27)] + dense_features = ['I' + str(i) for i in range(1, 14)] + + data[sparse_features] = data[sparse_features].fillna('-1', ) + data[dense_features] = data[dense_features].fillna(0, ) + target = ['label'] + + # 1.Label Encoding for sparse features,and do simple Transformation for dense features + for feat in sparse_features: + lbe = LabelEncoder() + data[feat] = lbe.fit_transform(data[feat]) + mms = MinMaxScaler(feature_range=(0, 1)) + data[dense_features] = mms.fit_transform(data[dense_features]) + + # 2.count #unique features for each sparse field,and record dense feature field name + + fixlen_feature_columns = [SparseFeat(feat, vocabulary_size=data[feat].nunique(),embedding_dim=4) + for i,feat in enumerate(sparse_features)] + [DenseFeat(feat, 1,) + for feat in dense_features] + + dnn_feature_columns = fixlen_feature_columns + linear_feature_columns = fixlen_feature_columns + + feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns) + + # 3.generate input data for model + + train, test = train_test_split(data, test_size=0.2) + train_model_input = {name:train[name] for name in feature_names} + test_model_input = {name:test[name] for name in feature_names} + + # 4.Define Model,train,predict and evaluate + model = FwFM(linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(100,100), task='binary') + model.compile("adam", "binary_crossentropy", + metrics=['binary_crossentropy'], ) + + history = model.fit(train_model_input, train[target].values, + batch_size=128, epochs=10, verbose=1, validation_split=0.2, ) + pred_ans = model.predict(test_model_input, batch_size=8) + print("test LogLoss", round(log_loss(test[target].values, pred_ans), 4)) + print("test AUC", round(roc_auc_score(test[target].values, pred_ans), 4)) + close_session(npu_keras_sess) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/TensorFlow/built-in/recommendation/DeepCTR_Series_for_TensorFlow/test/train_ID3057_FwFM_performance_1p_RT2_profiling2.sh b/TensorFlow/built-in/recommendation/DeepCTR_Series_for_TensorFlow/test/train_ID3057_FwFM_performance_1p_RT2_profiling2.sh new file mode 100644 index 0000000000000000000000000000000000000000..4d105d63746527b58ff261c1af4aefb0736b6538 --- /dev/null +++ b/TensorFlow/built-in/recommendation/DeepCTR_Series_for_TensorFlow/test/train_ID3057_FwFM_performance_1p_RT2_profiling2.sh @@ -0,0 +1,177 @@ +#!/bin/bash + +#当前路径,不需要修改 +cur_path=`pwd` + +#集合通信参数,不需要修改 + +export RANK_SIZE=1 +export JOB_ID=10087 +RANK_ID_START=0 +RankSize=1 +# 数据集路径,保持为空,不需要修改 +data_path="" +#export ASCEND_SLOG_PRINT_TO_STDOUT=1 + +#使能RT2.0 +export ENABLE_RUNTIME_V2=1 + +#基础参数,需要模型审视修改 +#网络名称,同目录名称 +Network="FwFM_ID3057_for_TensorFlow" +#训练epoch +train_epochs=5 +#训练batch_size +batch_size=128 +#训练step +train_steps= +#学习率 +learning_rate= + +#维测参数,precision_mode需要模型审视修改 +precision_mode="allow_fp32_to_fp16" +#维持参数,以下不需要修改 +over_dump=False +data_dump_flag=False +data_dump_step="10" +profiling=False + +# 帮助信息,不需要修改 +if [[ $1 == --help || $1 == -h ]];then + echo"usage:./train_performance_1P.sh " + echo " " + echo "parameter explain: + --precision_mode precision mode(allow_fp32_to_fp16/force_fp16/must_keep_origin_dtype/allow_mix_precision) + --over_dump if or not over detection, default is False + --data_dump_flag data dump flag, default is False + --data_dump_step data dump step, default is 10 + --profiling if or not profiling for performance debug, default is False + --data_path source data of training + -h/--help show help message + " + exit 1 +fi + +#参数校验,不需要修改 +for para in $* +do + if [[ $para == --precision_mode* ]];then + precision_mode=`echo ${para#*=}` + elif [[ $para == --over_dump* ]];then + over_dump=`echo ${para#*=}` + over_dump_path=${cur_path}/output/overflow_dump + mkdir -p ${over_dump_path} + elif [[ $para == --data_dump_flag* ]];then + data_dump_flag=`echo ${para#*=}` + data_dump_path=${cur_path}/output/data_dump + mkdir -p ${data_dump_path} + elif [[ $para == --data_dump_step* ]];then + data_dump_step=`echo ${para#*=}` + elif [[ $para == --profiling* ]];then + profiling=`echo ${para#*=}` + profiling_dump_path=${cur_path}/output/profiling + mkdir -p ${profiling_dump_path} + elif [[ $para == --data_path* ]];then + data_path=`echo ${para#*=}` + fi +done + +#校验是否传入data_path,不需要修改 +if [[ $data_path == "" ]];then + echo "[Error] para \"data_path\" must be confing" + exit 1 +fi + +#训练开始时间,不需要修改 +start_time=$(date +%s) + +#进入训练脚本目录,需要模型审视修改 +cd $cur_path/../examples + +sed -i "s|epochs=10|epochs=5|g" run_fwfm_profiling.py + +for((RANK_ID=$RANK_ID_START;RANK_ID<$((RANK_SIZE+RANK_ID_START));RANK_ID++)); +do + #设置环境变量,不需要修改 + echo "Device ID: $ASCEND_DEVICE_ID" + export RANK_ID=$RANK_ID + + #创建DeviceID输出目录,不需要修改 + if [ -d ${cur_path}/output/${ASCEND_DEVICE_ID} ];then + rm -rf ${cur_path}/output/${ASCEND_DEVICE_ID} + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + else + mkdir -p ${cur_path}/output/$ASCEND_DEVICE_ID/ckpt + fi + + #执行训练脚本,以下传参不需要修改,其他需要模型审视修改 + #--data_dir, --model_dir, --precision_mode, --over_dump, --over_dump_path,--data_dump_flag,--data_dump_step,--data_dump_path,--profiling,--profiling_dump_path + nohup python3 run_fwfm_profiling.py \ + --data_dir=${data_path} \ + --precision_mode=${precision_mode} \ + --profiling=${profiling} \ + --profiling_dump_path=${profiling_dump_path} > ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log 2>&1 & +done +wait + +#训练结束时间,不需要修改 +end_time=$(date +%s) +e2e_time=$(( $end_time - $start_time )) + +a=`find /usr/local/Ascend/ -name acp | awk -F 'acp' '{print $1}'` +cd $a +./msprof --parse=on --output=$cur_path/../examples/profiling +./msprof --export=on --output=$cur_path/../examples/profiling + + +sed -i "s|epochs=5|epochs=10|g" run_fwfm_profiling.py + +#结果打印,不需要修改 +echo "------------------ Final result ------------------" +# #输出性能FPS,需要模型审视修改 + +Time=`cat $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|tr -d '\b\r'|grep -Eo "[0-9]*us/sample"|awk -F "us/sample" 'END {print $1}'` +FPS=`awk 'BEGIN{printf "%.2f\n", 1 /'${Time}'*1000000}'` +#打印,不需要修改 +echo "Final Performance item/sec : $FPS" + +#输出CompileTime +CompileTime=`grep '/sample' $cur_path/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log| head -n 2| awk '{print $4}' | awk -F 's' '{sum+=$1} END {print sum}'` + +# #输出训练精度,需要模型审视修改 +train_accuracy=`grep "test AUC" ${cur_path}/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log|awk '{print $3}'` +# #打印,不需要修改 +echo "Final Train Accuracy : ${train_accuracy}" +echo "E2E Training Duration sec : $e2e_time" + +#性能看护结果汇总 +#训练用例信息,不需要修改 +BatchSize=${batch_size} +DeviceType=`uname -m` +CaseName=${Network}_bs${BatchSize}_${RANK_SIZE}'p'_'RT2'_'profiling2'_'perf' + +##获取性能数据,不需要修改 +#吞吐量 + +ActualFPS=${FPS} +#单迭代训练时长 +TrainingTime=`awk 'BEGIN{printf "%.2f\n",'${BatchSize}'/'${FPS}'}'` + +#从train_$ASCEND_DEVICE_ID.log提取Loss到train_${CaseName}_loss.txt中,需要根据模型审视 +cat $cur_path/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log|tr -d '\b\r'|grep -Eo " loss: [0-9]*\.[0-9]*"|awk -F " " '{print $2}' > $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt + +#最后一个迭代loss值,不需要修改 +ActualLoss=`awk 'END {print}' $cur_path/output/$ASCEND_DEVICE_ID/train_${CaseName}_loss.txt` + +#关键信息打印到${CaseName}.log中,不需要修改 +echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "BatchSize = ${BatchSize}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "DeviceType = ${DeviceType}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualFPS = ${ActualFPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "TrainingTime = ${TrainingTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log +echo "CompileTime = ${CompileTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log \ No newline at end of file