diff --git a/models/nlp/language_model/roberta/ixrt/README.md b/models/nlp/language_model/roberta/ixrt/README.md index 5ba6e8880b088b23bc9d8b4205291309951f0624..56fd4be779c97a08e972f1b753c738efcc9b7b4b 100644 --- a/models/nlp/language_model/roberta/ixrt/README.md +++ b/models/nlp/language_model/roberta/ixrt/README.md @@ -58,19 +58,22 @@ bash scripts/infer_roberta_fp16_performance.sh ### Accuracy -If you want to evaluate the accuracy of this model, please visit the website: , which integrates inference and training of many models under this framework, supporting the ILUVATAR backend +If you want to evaluate the accuracy of this model, please visit here: , which integrates inference and training of many models under this framework, supporting the ILUVATAR backend -For detailed steps regarding this model, please refer to this document: Note: You need to modify the relevant paths in the code to your own correct paths. + +For detailed steps regarding this model, please refer to this document: Note: You need to modify the relevant paths in the code to your own correct paths. ```bash -# Install requirements -pip3 install -r ./ByteMLPerf/byte_infer_perf/general_perf/requirements.txt -pip3 install -r ./ByteMLPerf/byte_infer_perf/general_perf/backends/ILUVATAR/requirements.txt -mv perf_engine.py ./ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py -# Move open_roberta -mkdir -p ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ -mv open_roberta ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ +pip3 install -r toolbox/ByteMLPerf/byte_infer_perf/general_perf/requirements.txt +pip3 install -r toolbox/ByteMLPerf/byte_infer_perf/general_perf/backends/ILUVATAR/requirements.txt +mv perf_engine.py toolbox/ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py + +mkdir -p toolbox/ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ +mv open_roberta toolbox/ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ +cd toolbox/ByteMLPerf/byte_infer_perf/general_perf/datasets/open_squad +wget https://lf-bytemlperf.17mh.cn/obj/bytemlperf-zoo/open_squad.tar +tar -vxf open_squad.tar # Get open_squad wget https://lf-bytemlperf.17mh.cn/obj/bytemlperf-zoo/open_squad.tar @@ -78,14 +81,12 @@ tar xf open_squad.tar cp ./open_squad/* ./ByteMLPerf/byte_infer_perf/general_perf/datasets/open_squad rm -f open_squad.tar -# Get csarron.tar -wget http://files.deepspark.org.cn:880/deepspark/csarron.tar -tar xf csarron.tar -rm -f csarron.tar -mv csarron/ ./ByteMLPerf/byte_infer_perf/ +mv csarron.tar toolbox/ByteMLPerf/byte_infer_perf/ +tar -zxvf csarron.tar +# Modify ByteMLPerf/byte_infer_perf/general_perf/datasets/open_squad/data_loader.py +# AutoTokenizer.from_pretrained("csarron/roberta-base-squad-v1") => AutoTokenizer.from_pretrained("/ByteMLPerf/byte_infer_perf/csarron/roberta-base-squad-v1") -# Run Acc scripts -cd ./ByteMLPerf/byte_infer_perf/ +cd toolbox/ByteMLPerf/byte_infer_perf/ python3 general_perf/core/perf_engine.py --hardware_type ILUVATAR --task roberta-torch-fp32 ``` diff --git a/models/nlp/language_model/roformer/ixrt/README.md b/models/nlp/language_model/roformer/ixrt/README.md index ba1e5975e0d0f6d094f3055aaf708c65dfed9b07..8eb1f233b395008d0c7c35cc35071c391d1b5eaa 100644 --- a/models/nlp/language_model/roformer/ixrt/README.md +++ b/models/nlp/language_model/roformer/ixrt/README.md @@ -64,31 +64,23 @@ bash scripts/infer_roformer_fp16_performance.sh ### Accuracy -If you want to evaluate the accuracy of this model, please visit the website: , which integrates inference and training of many models under this framework, supporting the ILUVATAR backend. +If you want to evaluate the accuracy of this model, please visit here: , which integrates inference and training of many models under this framework, supporting the ILUVATAR backend -For detailed steps regarding this model, please refer to this document: Note: You need to modify the relevant paths in the code to your own correct paths. + +For detailed steps regarding this model, please refer to this document: Note: You need to modify the relevant paths in the code to your own correct paths. ```bash -# Clone ByteMLPerf -git clone -b iluvatar_general_infer https://github.com/yudefu/ByteMLPerf.git -pip3 install -r ./ByteMLPerf/byte_infer_perf/general_perf/requirements.txt -mv perf_engine.py ./ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py -mkdir -p ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ - -# Comment Line102 in compile_backend_iluvatar.py -sed -i '102s/build_engine/# build_engine/' ./ByteMLPerf/byte_infer_perf/general_perf/backends/ILUVATAR/compile_backend_iluvatar.py - -# Move open_roformer -mv ./data/open_roformer ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ - -# Setup open_cail2019 dataset -wget https://lf-bytemlperf.17mh.cn/obj/bytemlperf-zoo/open_cail2019.tar -tar xf open_cail2019.tar -cp ./open_cail2019/* ./ByteMLPerf/byte_infer_perf/general_perf/datasets/open_cail2019 -rm -f open_cail2019.tar - -# Go to general_perf/ -cd ./ByteMLPerf/byte_infer_perf/general_perf + +pip3 install -r toolbox/ByteMLPerf/blob/iluvatar_general_infer/byte_infer_perf/general_perf/requirements.txt +mv perf_engine.py toolbox/ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py + +mkdir -p toolbox/ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ +# Delete Line102 ' build_engine(model_name=model_name, onnx_model_path=onnx_model_path, engine_path=engine_path, MaxBatchSize=MaxBatchSize, BuildFlag='FP16') ' which is the build engine process of conformer in the file ./ByteMLPerf/byte_infer_perf/general_perf/backends/ILUVATAR/compile_backend_iluvatar.py +mv ./data/open_roformer toolbox/ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ +# Make sure the roformer-frozen_end.onnx is in the path "./data/open_roformer". Or you should move it to './ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/open_roformer/'. +# mv path/to/roformer-frozen_end.onnx ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/open_roformer/ +wget https://lf-bytemlperf.17mh.cn/obj/bytemlperf-zoo/open_cail2019.tar -P toolbox/ByteMLPerf/byte_infer_perf/general_perf/datasets/open_cail2019 +cd toolbox/ByteMLPerf/byte_infer_perf/general_perf # Modify model_zoo/roformer-tf-fp32.json sed -i 's/segment:0/segment0/g; s/token:0/token0/g' model_zoo/roformer-tf-fp32.json # Run Acc scripts diff --git a/models/recommendation/widedeep/ixrt/README.md b/models/recommendation/widedeep/ixrt/README.md index fb01a4d49378ff076dd712ca225bb352262d1f80..3707f55d1acc0e674c87d10c01daf2ef4ee9fad3 100644 --- a/models/recommendation/widedeep/ixrt/README.md +++ b/models/recommendation/widedeep/ixrt/README.md @@ -50,19 +50,19 @@ bash scripts/infer_widedeep_fp16_performance.sh ### Accuracy -If you want to evaluate the accuracy of this model, please visit the website: , which integrates inference and training of many models under this framework, supporting the ILUVATAR backend +If you want to evaluate the accuracy of this model, please visit here: , which integrates inference and training of many models under this framework, supporting the ILUVATAR backend -For detailed steps regarding this model, please refer to this document: Note: You need to modify the relevant paths in the code to your own correct paths. + +For detailed steps regarding this model, please refer to this document: Note: You need to modify the relevant paths in the code to your own correct paths. ```bash -# Clone ByteMLPerf -git clone -b iluvatar_general_infer https://github.com/yudefu/ByteMLPerf.git -pip3 install -r ./ByteMLPerf/byte_infer_perf/general_perf/requirements.txt -mv perf_engine.py ./ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py -# Get eval.csv and onnx -mkdir -p ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/regular/open_wide_deep_saved_model -mkdir -p ./ByteMLPerf/byte_infer_perf/general_perf/datasets/open_criteo_kaggle/ +pip3 install -r toolbox/ByteMLPerf/byte_infer_perf/general_perf/requirements.txt +mv perf_engine.py toolbox/ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py + +mkdir -p toolbox/ByteMLPerf/byte_infer_perf/general_perf/model_zoo/regular/open_wide_deep_saved_model +mkdir -p toolbox/ByteMLPerf/byte_infer_perf/general_perf/datasets/open_criteo_kaggle/ +wget -O toolbox/ByteMLPerf/byte_infer_perf/general_perf/datasets/open_criteo_kaggle/eval.csv https://lf-bytemlperf.17mh.cn/obj/bytemlperf-zoo/eval.csv wget https://lf-bytemlperf.17mh.cn/obj/bytemlperf-zoo/eval.csv mv eval.csv ./ByteMLPerf/byte_infer_perf/general_perf/datasets/open_criteo_kaggle/ @@ -70,8 +70,8 @@ mv eval.csv ./ByteMLPerf/byte_infer_perf/general_perf/datasets/open_criteo_kaggl wget http://files.deepspark.org.cn:880/deepspark/widedeep_dynamicshape_new.onnx mv widedeep_dynamicshape_new.onnx ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/regular/open_wide_deep_saved_model/ -# Run Acc scripts -cd ./ByteMLPerf/byte_infer_perf/general_perf +mv path/to/widedeep_dynamicshape_new.onnx toolbox/ByteMLPerf/byte_infer_perf/general_perf/model_zoo/regular/open_wide_deep_saved_model/widedeep_dynamicshape.onnx +cd toolbox/ByteMLPerf/byte_infer_perf/general_perf python3 core/perf_engine.py --hardware_type ILUVATAR --task widedeep-tf-fp32 ```