From a9ca053f417b1df29d1b0561325d0a17a7be8fab Mon Sep 17 00:00:00 2001 From: tianxi-yi Date: Mon, 5 Aug 2024 15:59:50 +0800 Subject: [PATCH] Fix roberta demo, roformer demo and widedeep demo Readme --- .../nlp/language_model/roberta/ixrt/README.md | 24 ++++++++----------- .../language_model/roformer/ixrt/README.md | 20 +++++++--------- models/recommendation/widedeep/ixrt/README.md | 22 +++++++---------- 3 files changed, 27 insertions(+), 39 deletions(-) diff --git a/models/nlp/language_model/roberta/ixrt/README.md b/models/nlp/language_model/roberta/ixrt/README.md index b2db9485..0abdd571 100644 --- a/models/nlp/language_model/roberta/ixrt/README.md +++ b/models/nlp/language_model/roberta/ixrt/README.md @@ -50,24 +50,20 @@ bash scripts/infer_roberta_fp16_performance.sh ### Accuracy -If you want to evaluate the accuracy of this model, please visit the website: < https://github.com/yudefu/ByteMLPerf/tree/iluvatar_general_infer >, which integrates inference and training of many models under this framework, supporting the ILUVATAR backend +If you want to evaluate the accuracy of this model, please visit here: , which integrates inference and training of many models under this framework, supporting the ILUVATAR backend -```bash - -git clone https://github.com/yudefu/ByteMLPerf.git -b iluvatar_general_infer -``` -For detailed steps regarding this model, please refer to this document: < https://github.com/yudefu/ByteMLPerf/blob/iluvatar_general_infer/byte_infer_perf/general_perf/backends/ILUVATAR/README.zh_CN.md > Note: You need to modify the relevant paths in the code to your own correct paths. +For detailed steps regarding this model, please refer to this document: Note: You need to modify the relevant paths in the code to your own correct paths. ```bash -pip3 install -r ./ByteMLPerf/byte_infer_perf/general_perf/requirements.txt -pip3 install -r ByteMLPerf/byte_infer_perf/general_perf/backends/ILUVATAR/requirements.txt -mv perf_engine.py ./ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py +pip3 install -r toolbox/ByteMLPerf/byte_infer_perf/general_perf/requirements.txt +pip3 install -r toolbox/ByteMLPerf/byte_infer_perf/general_perf/backends/ILUVATAR/requirements.txt +mv perf_engine.py toolbox/ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py -mkdir -p ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ -mv open_roberta ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ -cd ./ByteMLPerf/byte_infer_perf/general_perf/datasets/open_squad +mkdir -p toolbox/ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ +mv open_roberta toolbox/ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ +cd toolbox/ByteMLPerf/byte_infer_perf/general_perf/datasets/open_squad wget https://lf-bytemlperf.17mh.cn/obj/bytemlperf-zoo/open_squad.tar tar -vxf open_squad.tar @@ -75,11 +71,11 @@ sftp -P 29880 vipzjtd@iftp.iluvatar.com.cn(如果链接不上用ip替换:10. get /upload/3-app/byteperf/csarron.tar exit -mv csarron.tar ./ByteMLPerf/byte_infer_perf/ +mv csarron.tar toolbox/ByteMLPerf/byte_infer_perf/ tar -zxvf csarron.tar # Modify ByteMLPerf/byte_infer_perf/general_perf/datasets/open_squad/data_loader.py # AutoTokenizer.from_pretrained("csarron/roberta-base-squad-v1") => AutoTokenizer.from_pretrained("/ByteMLPerf/byte_infer_perf/csarron/roberta-base-squad-v1") -cd ./ByteMLPerf/byte_infer_perf/ +cd toolbox/ByteMLPerf/byte_infer_perf/ python3 general_perf/core/perf_engine.py --hardware_type ILUVATAR --task roberta-torch-fp32 ``` \ No newline at end of file diff --git a/models/nlp/language_model/roformer/ixrt/README.md b/models/nlp/language_model/roformer/ixrt/README.md index 6ef9c784..8324b2a3 100644 --- a/models/nlp/language_model/roformer/ixrt/README.md +++ b/models/nlp/language_model/roformer/ixrt/README.md @@ -51,27 +51,23 @@ bash scripts/infer_roformer_fp16_performance.sh ### Accuracy -If you want to evaluate the accuracy of this model, please visit the website: < https://github.com/yudefu/ByteMLPerf/tree/iluvatar_general_infer >, which integrates inference and training of many models under this framework, supporting the ILUVATAR backend +If you want to evaluate the accuracy of this model, please visit here: , which integrates inference and training of many models under this framework, supporting the ILUVATAR backend -```bash - -git clone https://github.com/yudefu/ByteMLPerf.git -b iluvatar_general_infer -``` -For detailed steps regarding this model, please refer to this document: < https://github.com/yudefu/ByteMLPerf/blob/iluvatar_general_infer/byte_infer_perf/general_perf/backends/ILUVATAR/README.zh_CN.md > Note: You need to modify the relevant paths in the code to your own correct paths. +For detailed steps regarding this model, please refer to this document: Note: You need to modify the relevant paths in the code to your own correct paths. ```bash -pip3 install -r https://github.com/yudefu/ByteMLPerf/blob/iluvatar_general_infer/byte_infer_perf/general_perf/requirements.txt -mv perf_engine.py ./ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py +pip3 install -r toolbox/ByteMLPerf/blob/iluvatar_general_infer/byte_infer_perf/general_perf/requirements.txt +mv perf_engine.py toolbox/ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py -mkdir -p ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ +mkdir -p toolbox/ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ # Delete Line102 ' build_engine(model_name=model_name, onnx_model_path=onnx_model_path, engine_path=engine_path, MaxBatchSize=MaxBatchSize, BuildFlag='FP16') ' which is the build engine process of conformer in the file ./ByteMLPerf/byte_infer_perf/general_perf/backends/ILUVATAR/compile_backend_iluvatar.py -mv ./data/open_roformer ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ +mv ./data/open_roformer toolbox/ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/ # Make sure the roformer-frozen_end.onnx is in the path "./data/open_roformer". Or you should move it to './ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/open_roformer/'. # mv path/to/roformer-frozen_end.onnx ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/popular/open_roformer/ -wget https://lf-bytemlperf.17mh.cn/obj/bytemlperf-zoo/open_cail2019.tar -P ./ByteMLPerf/byte_infer_perf/general_perf/datasets/open_cail2019 -cd ./ByteMLPerf/byte_infer_perf/general_perf +wget https://lf-bytemlperf.17mh.cn/obj/bytemlperf-zoo/open_cail2019.tar -P toolbox/ByteMLPerf/byte_infer_perf/general_perf/datasets/open_cail2019 +cd toolbox/ByteMLPerf/byte_infer_perf/general_perf # Modify model_zoo/roformer-tf-fp32.json # "inputs": "input_segment:0,input_token:0" --> "inputs": "input_segment0,input_token0" # "input_shape": {"input_segment:0": [1, 1024], "input_token:0": [1, 1024]} -->"input_shape": {"input_segment0": [1, 1024], "input_token0": [1, 1024]} diff --git a/models/recommendation/widedeep/ixrt/README.md b/models/recommendation/widedeep/ixrt/README.md index a8703e29..783aa41e 100644 --- a/models/recommendation/widedeep/ixrt/README.md +++ b/models/recommendation/widedeep/ixrt/README.md @@ -49,29 +49,25 @@ bash scripts/infer_widedeep_fp16_performance.sh ### Accuracy -If you want to evaluate the accuracy of this model, please visit the website: < https://github.com/yudefu/ByteMLPerf/tree/iluvatar_general_infer >, which integrates inference and training of many models under this framework, supporting the ILUVATAR backend +If you want to evaluate the accuracy of this model, please visit here: , which integrates inference and training of many models under this framework, supporting the ILUVATAR backend -```bash - -git clone https://github.com/yudefu/ByteMLPerf.git -b iluvatar_general_infer -``` -For detailed steps regarding this model, please refer to this document: < https://github.com/yudefu/ByteMLPerf/blob/iluvatar_general_infer/byte_infer_perf/general_perf/backends/ILUVATAR/README.zh_CN.md > Note: You need to modify the relevant paths in the code to your own correct paths. +For detailed steps regarding this model, please refer to this document: Note: You need to modify the relevant paths in the code to your own correct paths. ```bash -pip3 install -r ./ByteMLPerf/byte_infer_perf/general_perf/requirements.txt -mv perf_engine.py ./ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py +pip3 install -r toolbox/ByteMLPerf/byte_infer_perf/general_perf/requirements.txt +mv perf_engine.py toolbox/ByteMLPerf/byte_infer_perf/general_perf/core/perf_engine.py -mkdir -p ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/regular/open_wide_deep_saved_model -mkdir -p ./ByteMLPerf/byte_infer_perf/general_perf/datasets/open_criteo_kaggle/ -wget -O ./ByteMLPerf/byte_infer_perf/general_perf/datasets/open_criteo_kaggle/eval.csv https://lf-bytemlperf.17mh.cn/obj/bytemlperf-zoo/eval.csv +mkdir -p toolbox/ByteMLPerf/byte_infer_perf/general_perf/model_zoo/regular/open_wide_deep_saved_model +mkdir -p toolbox/ByteMLPerf/byte_infer_perf/general_perf/datasets/open_criteo_kaggle/ +wget -O toolbox/ByteMLPerf/byte_infer_perf/general_perf/datasets/open_criteo_kaggle/eval.csv https://lf-bytemlperf.17mh.cn/obj/bytemlperf-zoo/eval.csv sftp -P 29889 user01@58.247.142.52 password:5$gS%659 cd yudefu/bytedance_perf ; get widedeep_dynamicshape_new.onnx exit -mv path/to/widedeep_dynamicshape_new.onnx ./ByteMLPerf/byte_infer_perf/general_perf/model_zoo/regular/open_wide_deep_saved_model/widedeep_dynamicshape.onnx -cd ./ByteMLPerf/byte_infer_perf/general_perf +mv path/to/widedeep_dynamicshape_new.onnx toolbox/ByteMLPerf/byte_infer_perf/general_perf/model_zoo/regular/open_wide_deep_saved_model/widedeep_dynamicshape.onnx +cd toolbox/ByteMLPerf/byte_infer_perf/general_perf python3 core/perf_engine.py --hardware_type ILUVATAR --task widedeep-tf-fp32 ``` \ No newline at end of file -- Gitee