diff --git a/models/cv/classification/alexnet/ixrt/README.md b/models/cv/classification/alexnet/ixrt/README.md index 4a25a911ba8e49f530ce4602eeed5845e02debe3..65fa53efbc1115f3c9bb26c820afa1337d78a561 100644 --- a/models/cv/classification/alexnet/ixrt/README.md +++ b/models/cv/classification/alexnet/ixrt/README.md @@ -15,11 +15,7 @@ yum install -y mesa-libGL ## Ubuntu apt install -y libgl1-mesa-dev -pip3 install tqdm -pip3 install onnx -pip3 install onnxsim -pip3 install tabulate -pip3 install pycuda +pip3 install -r requirements.txt ``` ### Download diff --git a/models/cv/classification/alexnet/ixrt/requirements.txt b/models/cv/classification/alexnet/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..bc645b22bbcf3675e3bfe6f8d2da91ca8c089b3a --- /dev/null +++ b/models/cv/classification/alexnet/ixrt/requirements.txt @@ -0,0 +1,5 @@ +tqdm +onnx +onnxsim +tabulate +pycuda \ No newline at end of file diff --git a/models/cv/classification/densenet121/ixrt/README.md b/models/cv/classification/densenet121/ixrt/README.md index 9b5c20781e62eabf873f71a0dc63f61a4b015a29..f506c085b2dadc6c4a62164c258bc571d51ac27e 100644 --- a/models/cv/classification/densenet121/ixrt/README.md +++ b/models/cv/classification/densenet121/ixrt/README.md @@ -15,12 +15,7 @@ yum install -y mesa-libGL ## Ubuntu apt install -y libgl1-mesa-dev -pip3 install tqdm -pip3 install onnx -pip3 install onnxsim -pip3 install tabulate -pip3 install ppq -pip3 install pycuda +pip3 install -r requirements.txt ``` ### Download @@ -40,7 +35,7 @@ python3 export_onnx.py --output_model checkpoints/densenet121.onnx export DATASETS_DIR=/path/to/imagenet_val/ export CHECKPOINTS_DIR=./checkpoints export RUN_DIR=./ -export CONFIG_DIR=config/DENSENET_CONFIG +export CONFIG_DIR=config/DENSENET121_CONFIG ``` ### FP16 diff --git a/models/cv/classification/densenet121/ixrt/config/DENSENET_CONFIG b/models/cv/classification/densenet121/ixrt/config/DENSENET121_CONFIG similarity index 100% rename from models/cv/classification/densenet121/ixrt/config/DENSENET_CONFIG rename to models/cv/classification/densenet121/ixrt/config/DENSENET121_CONFIG diff --git a/models/cv/classification/densenet121/ixrt/inference.py b/models/cv/classification/densenet121/ixrt/inference.py index 2c9dcb3f9cc5b9a26903651a31fafa16d8f0db31..50aafd4fd5ef9664203cdcbdfbdb577edca933c4 100644 --- a/models/cv/classification/densenet121/ixrt/inference.py +++ b/models/cv/classification/densenet121/ixrt/inference.py @@ -83,6 +83,7 @@ def main(config): total_sample = 0 acc_top1, acc_top5 = 0, 0 + start_time = time.time() with tqdm(total= len(dataloader)) as _tqdm: for idx, (batch_data, batch_label) in enumerate(dataloader): batch_data = batch_data.numpy().astype(inputs[0]["dtype"]) @@ -104,7 +105,10 @@ def main(config): _tqdm.set_postfix(acc_1='{:.4f}'.format(acc_top1/total_sample), acc_5='{:.4f}'.format(acc_top5/total_sample)) _tqdm.update(1) + end_time = time.time() + end2end_time = end_time - start_time + print(F"E2E time : {end2end_time:.3f} seconds") print(F"Acc@1 : {acc_top1/total_sample} = {acc_top1}/{total_sample}") print(F"Acc@5 : {acc_top5/total_sample} = {acc_top5}/{total_sample}") acc1 = acc_top1/total_sample diff --git a/models/cv/classification/densenet121/ixrt/requirements.txt b/models/cv/classification/densenet121/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..01ec4f116bdac5ae39f168903eed8268f2391a08 --- /dev/null +++ b/models/cv/classification/densenet121/ixrt/requirements.txt @@ -0,0 +1,6 @@ +tqdm +onnx +onnxsim +tabulate +ppq +pycuda \ No newline at end of file diff --git a/models/cv/classification/densenet121/ixrt/scripts/infer_densenet_fp16_accuracy.sh b/models/cv/classification/densenet121/ixrt/scripts/infer_densenet121_fp16_accuracy.sh similarity index 100% rename from models/cv/classification/densenet121/ixrt/scripts/infer_densenet_fp16_accuracy.sh rename to models/cv/classification/densenet121/ixrt/scripts/infer_densenet121_fp16_accuracy.sh diff --git a/models/cv/classification/densenet121/ixrt/scripts/infer_densenet_fp16_performance.sh b/models/cv/classification/densenet121/ixrt/scripts/infer_densenet121_fp16_performance.sh similarity index 100% rename from models/cv/classification/densenet121/ixrt/scripts/infer_densenet_fp16_performance.sh rename to models/cv/classification/densenet121/ixrt/scripts/infer_densenet121_fp16_performance.sh diff --git a/models/cv/classification/efficientnet_b0/ixrt/README.md b/models/cv/classification/efficientnet_b0/ixrt/README.md index bf57f509112404bf3a2ba2ead6a1dd828d539a3b..e7f3087bc24565d9459a8d90caf5e19585c07f11 100644 --- a/models/cv/classification/efficientnet_b0/ixrt/README.md +++ b/models/cv/classification/efficientnet_b0/ixrt/README.md @@ -15,12 +15,7 @@ yum install -y mesa-libGL ## Ubuntu apt install -y libgl1-mesa-dev -pip3 install tqdm -pip3 install onnx -pip3 install onnxsim -pip3 install tabulate -pip3 install ppq==0.6.6 -pip install protobuf==3.20.3 pycuda +pip3 install -r requirements.txt ``` ### Download @@ -32,7 +27,7 @@ Dataset: to download the validation dat ### Model Conversion ```bash -python3 python/export_onnx.py --origin_model /path/to/efficientnet_b0_rwightman-3dd342df.pth --output_model efficientnet-b0.onnx +python3 python/export_onnx.py --origin_model /path/to/efficientnet_b0_rwightman-3dd342df.pth --output_model efficientnet_b0.onnx ``` ## Inference diff --git a/models/cv/classification/efficientnet_b0/ixrt/config/EFFICIENTNET_B0_CONFIG b/models/cv/classification/efficientnet_b0/ixrt/config/EFFICIENTNET_B0_CONFIG index f9a0976e246bfb155e473a410d9e922155be9665..f566df019d19bb2eb5b57af63837e42e8cdd7504 100644 --- a/models/cv/classification/efficientnet_b0/ixrt/config/EFFICIENTNET_B0_CONFIG +++ b/models/cv/classification/efficientnet_b0/ixrt/config/EFFICIENTNET_B0_CONFIG @@ -18,7 +18,7 @@ # ORIGINE_MODEL : 原始onnx文件名称 IMGSIZE=224 MODEL_NAME=EfficientNet_b0 -ORIGINE_MODEL=efficientnet-b0.onnx +ORIGINE_MODEL=efficientnet_b0.onnx # QUANT CONFIG (仅PRECISION为int8时生效) # QUANT_OBSERVER : 量化策略,可选 [hist_percentile, percentile, minmax, entropy, ema] diff --git a/models/cv/classification/efficientnet_b0/ixrt/python/inference.py b/models/cv/classification/efficientnet_b0/ixrt/python/inference.py index 414546b2aa0f945b47ac968a86ac72c6899d824f..f1155702bcdccadc65da85b589aeb25ec269151c 100644 --- a/models/cv/classification/efficientnet_b0/ixrt/python/inference.py +++ b/models/cv/classification/efficientnet_b0/ixrt/python/inference.py @@ -85,6 +85,7 @@ def main(config): total_sample = 0 acc_top1, acc_top5 = 0, 0 + start_time = time.time() with tqdm(total= len(dataloader)) as _tqdm: for idx, (batch_data, batch_label) in enumerate(dataloader): batch_data = batch_data.numpy().astype(inputs[0]["dtype"]) @@ -106,7 +107,10 @@ def main(config): _tqdm.set_postfix(acc_1='{:.4f}'.format(acc_top1/total_sample), acc_5='{:.4f}'.format(acc_top5/total_sample)) _tqdm.update(1) + end_time = time.time() + end2end_time = end_time - start_time + print(F"E2E time : {end2end_time:.3f} seconds") print(F"Acc@1 : {acc_top1/total_sample} = {acc_top1}/{total_sample}") print(F"Acc@5 : {acc_top5/total_sample} = {acc_top5}/{total_sample}") acc1 = acc_top1/total_sample diff --git a/models/cv/classification/efficientnet_b0/ixrt/requirements.txt b/models/cv/classification/efficientnet_b0/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..46a30ba219c99614c9dea617a334954704f70c91 --- /dev/null +++ b/models/cv/classification/efficientnet_b0/ixrt/requirements.txt @@ -0,0 +1,7 @@ +tqdm +onnx +onnxsim +tabulate +ppq==0.6.6 +protobuf==3.20.3 +pycuda \ No newline at end of file diff --git a/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_fp16_accuracy.sh b/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_fp16_accuracy.sh index a8ca6d11ff45cf22e116e9833448792e2b2c019d..f184e30d2aafe766cedc44c2daf67c12bb39e44b 100644 --- a/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_fp16_accuracy.sh +++ b/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_fp16_accuracy.sh @@ -14,7 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. batchsize=32 -model_path="efficientnet-b0" +model_path="efficientnet_b0" # model_path="resnet18" datasets_path=${DATASETS_DIR} diff --git a/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_fp16_performance.sh b/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_fp16_performance.sh index ca523b2ff17f262c25d1d41c1fbd284d6d59b2c6..458d91367021f7ed980748f559625c8a1c91baff 100644 --- a/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_fp16_performance.sh +++ b/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_fp16_performance.sh @@ -14,14 +14,10 @@ # License for the specific language governing permissions and limitations # under the License. batchsize=32 -model_path="efficientnet-b0" +model_path="efficientnet_b0" # model_path="resnet18" datasets_path=${DATASETS_DIR} -# create onnx -python3 python/export_onnx.py \ - --output_model ${model_path}.onnx - # change batchsize python3 python/modify_batchsize.py \ --batch_size ${batchsize} \ diff --git a/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_int8_accuracy.sh b/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_int8_accuracy.sh index 5f1c4827f690b6f42df059b745f24e05f75149fe..82517f705e7ae42da3147e5d9677eecfe145ad2d 100644 --- a/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_int8_accuracy.sh +++ b/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_int8_accuracy.sh @@ -14,14 +14,10 @@ # License for the specific language governing permissions and limitations # under the License. batchsize=32 -model_path="efficientnet-b0" +model_path="efficientnet_b0" # model_path="resnet18" datasets_path=${DATASETS_DIR} -# create onnx -python3 python/export_onnx.py \ - --output_model ${model_path}.onnx - # change batchsize python3 python/modify_batchsize.py \ --batch_size ${batchsize} \ diff --git a/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_int8_performance.sh b/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_int8_performance.sh index 9c1e159f8085490c1578b2eddc45f742957d99c1..4a40c03038f747b6bd81c13e321f15b3af1e0577 100644 --- a/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_int8_performance.sh +++ b/models/cv/classification/efficientnet_b0/ixrt/scripts/infer_efficientnet_b0_int8_performance.sh @@ -14,14 +14,10 @@ # License for the specific language governing permissions and limitations # under the License. batchsize=32 -model_path="efficientnet-b0" +model_path="efficientnet_b0" # model_path="resnet18" datasets_path=${DATASETS_DIR} -# create onnx -python3 python/export_onnx.py \ - --output_model ${model_path}.onnx - # change batchsize python3 python/modify_batchsize.py \ --batch_size ${batchsize} \ diff --git a/models/cv/classification/efficientnet_v2/ixrt/README.md b/models/cv/classification/efficientnet_v2/ixrt/README.md index 23c83cf58a8d78876a41df134db1af58c9addef4..ee9197497d5d996aaa532cfc0961674b856b37e6 100755 --- a/models/cv/classification/efficientnet_v2/ixrt/README.md +++ b/models/cv/classification/efficientnet_v2/ixrt/README.md @@ -15,14 +15,7 @@ yum install -y mesa-libGL ## Ubuntu apt install -y libgl1-mesa-dev -pip3 install tqdm -pip3 install onnx -pip3 install onnxsim -pip3 install tabulate -pip3 install timm -pip3 install ppq -pip3 install pycuda -pip3 install protobuf==3.20.0 +pip3 install -r requirements.txt ``` ### Download @@ -35,15 +28,15 @@ Dataset: to download the validation dat ```bash mkdir checkpoints -git clone https://github.com/huggingface/pytorch-image-models.git && git checkout -cp /Path/to/ixrt/export_onnx.py pytorch-image-models/timm/models -cd pytorch-image-models/timm/models -rm _builder.py -mv /Path/ixrt/_builder.py pytorch-image-models/timm/models +git clone https://github.com/huggingface/pytorch-image-models.git +cp ./export_onnx.py pytorch-image-models/timm/models +rm pytorch-image-models/timm/models/_builder.py +mv ./_builder.py pytorch-image-models/timm/models cd pytorch-image-models/timm mkdir -p /root/.cache/torch/hub/checkpoints/ wget -P /root/.cache/torch/hub/checkpoints/ https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth -python3 -m models.export_onnx --output_model ../../checkpoints/efficientnet.onnx +python3 -m models.export_onnx --output_model ../../checkpoints/efficientnet_v2.onnx +cd ../../ ``` ## Inference @@ -53,7 +46,7 @@ export PROJ_DIR=/Path/to/efficientnet_v2/ixrt export DATASETS_DIR=/path/to/imagenet_val/ export CHECKPOINTS_DIR=./checkpoints export RUN_DIR=/Path/to/efficientnet_v2/ixrt -export CONFIG_DIR=/Path/to/config/EFFICIENTNET_V2T_CONFIG +export CONFIG_DIR=/Path/to/config/EFFICIENTNET_V2_CONFIG export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python ``` @@ -61,18 +54,18 @@ export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python ```bash # Accuracy -bash scripts/infer_efficientnet_fp16_accuracy.sh +bash scripts/infer_efficientnet_v2_fp16_accuracy.sh # Performance -bash scripts/infer_efficientnet_fp16_performance.sh +bash scripts/infer_efficientnet_v2_fp16_performance.sh ``` ### INT8 ```bash # Accuracy -bash scripts/infer_efficientnet_int8_accuracy.sh +bash scripts/infer_efficientnet_v2_int8_accuracy.sh # Performance -bash scripts/infer_efficientnet_int8_performance.sh +bash scripts/infer_efficientnet_v2_int8_performance.sh ``` ## Results diff --git a/models/cv/classification/efficientnet_v2/ixrt/config/EFFICIENTNET_V2T_CONFIG b/models/cv/classification/efficientnet_v2/ixrt/config/EFFICIENTNET_V2_CONFIG similarity index 95% rename from models/cv/classification/efficientnet_v2/ixrt/config/EFFICIENTNET_V2T_CONFIG rename to models/cv/classification/efficientnet_v2/ixrt/config/EFFICIENTNET_V2_CONFIG index b9e40159818f0dec5fbffff0487b049dea9435ae..6979f31db4b15ffd9b3f41d05f20e4394ded8178 100644 --- a/models/cv/classification/efficientnet_v2/ixrt/config/EFFICIENTNET_V2T_CONFIG +++ b/models/cv/classification/efficientnet_v2/ixrt/config/EFFICIENTNET_V2_CONFIG @@ -17,8 +17,8 @@ # MODEL_NAME : 生成onnx/engine的basename # ORIGINE_MODEL : 原始onnx文件名称 IMGSIZE=288 -MODEL_NAME=EfficientNetv2_t -ORIGINE_MODEL=efficientnet.onnx +MODEL_NAME=efficientnet_v2 +ORIGINE_MODEL=efficientnet_v2.onnx # QUANT CONFIG (仅PRECISION为int8时生效) # QUANT_OBSERVER : 量化策略,可选 [hist_percentile, percentile, minmax, entropy, ema] diff --git a/models/cv/classification/efficientnet_v2/ixrt/inference.py b/models/cv/classification/efficientnet_v2/ixrt/inference.py index 62ec18b30cd51167fb8d7f2babc01430511ead3f..fcca27a3a2426d8601100a6b6786e94265799d9f 100644 --- a/models/cv/classification/efficientnet_v2/ixrt/inference.py +++ b/models/cv/classification/efficientnet_v2/ixrt/inference.py @@ -85,6 +85,7 @@ def main(config): total_sample = 0 acc_top1, acc_top5 = 0, 0 + start_time = time.time() with tqdm(total= len(dataloader)) as _tqdm: for idx, (batch_data, batch_label) in enumerate(dataloader): batch_data = batch_data.numpy().astype(inputs[0]["dtype"]) @@ -106,7 +107,10 @@ def main(config): _tqdm.set_postfix(acc_1='{:.4f}'.format(acc_top1/total_sample), acc_5='{:.4f}'.format(acc_top5/total_sample)) _tqdm.update(1) + end_time = time.time() + end2end_time = end_time - start_time + print(F"E2E time : {end2end_time:.3f} seconds") print(F"Acc@1 : {acc_top1/total_sample} = {acc_top1}/{total_sample}") print(F"Acc@5 : {acc_top5/total_sample} = {acc_top5}/{total_sample}") acc1 = acc_top1/total_sample diff --git a/models/cv/classification/efficientnet_v2/ixrt/requirements.txt b/models/cv/classification/efficientnet_v2/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..09e8dab9fbba81751d8862b1de0cffdbd1c1059b --- /dev/null +++ b/models/cv/classification/efficientnet_v2/ixrt/requirements.txt @@ -0,0 +1,8 @@ +tqdm +onnx +onnxsim +tabulate +timm==1.0.10 +ppq +pycuda +protobuf==3.20.0 \ No newline at end of file diff --git a/models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_fp16_accuracy.sh b/models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_v2_fp16_accuracy.sh old mode 100755 new mode 100644 similarity index 100% rename from models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_fp16_accuracy.sh rename to models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_v2_fp16_accuracy.sh diff --git a/models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_fp16_performance.sh b/models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_v2_fp16_performance.sh old mode 100755 new mode 100644 similarity index 100% rename from models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_fp16_performance.sh rename to models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_v2_fp16_performance.sh diff --git a/models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_int8_accuracy.sh b/models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_v2_int8_accuracy.sh old mode 100755 new mode 100644 similarity index 100% rename from models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_int8_accuracy.sh rename to models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_v2_int8_accuracy.sh diff --git a/models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_int8_performance.sh b/models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_v2_int8_performance.sh old mode 100755 new mode 100644 similarity index 100% rename from models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_int8_performance.sh rename to models/cv/classification/efficientnet_v2/ixrt/scripts/infer_efficientnet_v2_int8_performance.sh diff --git a/models/cv/classification/googlenet/ixrt/README.md b/models/cv/classification/googlenet/ixrt/README.md index 001319b607136a885c3fd6b6394d0a4d2bbffd81..fa96b4759f7c863ba0c6d3a59e96c1629f59c690 100644 --- a/models/cv/classification/googlenet/ixrt/README.md +++ b/models/cv/classification/googlenet/ixrt/README.md @@ -15,10 +15,7 @@ yum install -y mesa-libGL ## Ubuntu apt install -y libgl1-mesa-dev -pip3 install tqdm -pip3 install onnx -pip3 install onnxsim -pip3 install tabulate +pip3 install -r requirements.txt ``` ### Download diff --git a/models/cv/classification/googlenet/ixrt/inference.py b/models/cv/classification/googlenet/ixrt/inference.py index 2c9dcb3f9cc5b9a26903651a31fafa16d8f0db31..50aafd4fd5ef9664203cdcbdfbdb577edca933c4 100644 --- a/models/cv/classification/googlenet/ixrt/inference.py +++ b/models/cv/classification/googlenet/ixrt/inference.py @@ -83,6 +83,7 @@ def main(config): total_sample = 0 acc_top1, acc_top5 = 0, 0 + start_time = time.time() with tqdm(total= len(dataloader)) as _tqdm: for idx, (batch_data, batch_label) in enumerate(dataloader): batch_data = batch_data.numpy().astype(inputs[0]["dtype"]) @@ -104,7 +105,10 @@ def main(config): _tqdm.set_postfix(acc_1='{:.4f}'.format(acc_top1/total_sample), acc_5='{:.4f}'.format(acc_top5/total_sample)) _tqdm.update(1) + end_time = time.time() + end2end_time = end_time - start_time + print(F"E2E time : {end2end_time:.3f} seconds") print(F"Acc@1 : {acc_top1/total_sample} = {acc_top1}/{total_sample}") print(F"Acc@5 : {acc_top5/total_sample} = {acc_top5}/{total_sample}") acc1 = acc_top1/total_sample diff --git a/models/cv/classification/googlenet/ixrt/requirements.txt b/models/cv/classification/googlenet/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e1eda59c3910ca96c73128bab86d534dbd55bbae --- /dev/null +++ b/models/cv/classification/googlenet/ixrt/requirements.txt @@ -0,0 +1,4 @@ +tqdm +onnx +onnxsim +tabulate \ No newline at end of file diff --git a/models/cv/classification/inception_v3/ixrt/README.md b/models/cv/classification/inception_v3/ixrt/README.md index 523fa268d3fd4d4272f27a6c8697c2bb78c3a09b..4fbbe94f695cb00a1a467ad39036a620ec4b93ed 100755 --- a/models/cv/classification/inception_v3/ixrt/README.md +++ b/models/cv/classification/inception_v3/ixrt/README.md @@ -15,13 +15,7 @@ yum install -y mesa-libGL ## Ubuntu apt install -y libgl1-mesa-dev -pip3 install pycuda -pip3 install tqdm -pip3 install onnx -pip3 install onnxsim -pip3 install tabulate -pip3 install ppq -pip3 install protobuf==3.20.0 +pip3 install -r requirements.txt ``` ### Download @@ -35,7 +29,7 @@ Dataset: to download the validation dat ```bash mkdir checkpoints -python3 export.py --weight inception_v3_google-0cc3c7bd.pth --output checkpoints/inception-v3.onnx +python3 export.py --weight inception_v3_google-0cc3c7bd.pth --output checkpoints/inception_v3.onnx ``` ## Inference diff --git a/models/cv/classification/inception_v3/ixrt/config/INCEPTION_V3_CONFIG b/models/cv/classification/inception_v3/ixrt/config/INCEPTION_V3_CONFIG index f5223eaf824040a2ecad2a0759925132aa82bb0d..041b09103ee4fec17959afb18887ee595e7b74b3 100644 --- a/models/cv/classification/inception_v3/ixrt/config/INCEPTION_V3_CONFIG +++ b/models/cv/classification/inception_v3/ixrt/config/INCEPTION_V3_CONFIG @@ -18,7 +18,7 @@ # ORIGINE_MODEL : 原始onnx文件名称 IMGSIZE=224 MODEL_NAME=Inception_v3 -ORIGINE_MODEL=inception-v3.onnx +ORIGINE_MODEL=inception_v3.onnx # QUANT CONFIG (仅PRECISION为int8时生效) # QUANT_OBSERVER : 量化策略,可选 [hist_percentile, percentile, minmax, entropy, ema] diff --git a/models/cv/classification/inception_v3/ixrt/inference.py b/models/cv/classification/inception_v3/ixrt/inference.py index 62ec18b30cd51167fb8d7f2babc01430511ead3f..fcca27a3a2426d8601100a6b6786e94265799d9f 100644 --- a/models/cv/classification/inception_v3/ixrt/inference.py +++ b/models/cv/classification/inception_v3/ixrt/inference.py @@ -85,6 +85,7 @@ def main(config): total_sample = 0 acc_top1, acc_top5 = 0, 0 + start_time = time.time() with tqdm(total= len(dataloader)) as _tqdm: for idx, (batch_data, batch_label) in enumerate(dataloader): batch_data = batch_data.numpy().astype(inputs[0]["dtype"]) @@ -106,7 +107,10 @@ def main(config): _tqdm.set_postfix(acc_1='{:.4f}'.format(acc_top1/total_sample), acc_5='{:.4f}'.format(acc_top5/total_sample)) _tqdm.update(1) + end_time = time.time() + end2end_time = end_time - start_time + print(F"E2E time : {end2end_time:.3f} seconds") print(F"Acc@1 : {acc_top1/total_sample} = {acc_top1}/{total_sample}") print(F"Acc@5 : {acc_top5/total_sample} = {acc_top5}/{total_sample}") acc1 = acc_top1/total_sample diff --git a/models/cv/classification/inception_v3/ixrt/requirements.txt b/models/cv/classification/inception_v3/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..bb2683ddc5fb6d26debebe20d8dbd0b5a3c6c3f1 --- /dev/null +++ b/models/cv/classification/inception_v3/ixrt/requirements.txt @@ -0,0 +1,7 @@ +pycuda +tqdm +onnx +onnxsim +tabulate +ppq +protobuf==3.20.0 \ No newline at end of file diff --git a/models/cv/classification/mobilenet_v2/ixrt/README.md b/models/cv/classification/mobilenet_v2/ixrt/README.md index d3a375f8b5a38da49481c3adf23195027b32ae68..65e27ec41ebd829ac074c315227a68a3d5b463a1 100644 --- a/models/cv/classification/mobilenet_v2/ixrt/README.md +++ b/models/cv/classification/mobilenet_v2/ixrt/README.md @@ -9,23 +9,34 @@ The MobileNetV2 architecture is based on an inverted residual structure where th ### Install ```bash -pip3 install tqdm -pip3 install onnxsim -pip3 install opencv-python -pip3 install ppq -pip3 install protobuf==3.20.0 +pip3 install -r requirements.txt ``` ### Download -Download the [imagenet](https://www.image-net.org/download.php) validation dataset, and place in `${PROJ_ROOT}/data/datasets`; +Pretrained model: + +Download the [imagenet](https://www.image-net.org/download.php) to download the validation dataset. + +### Model Conversion + +```bash +mkdir checkpoints +python3 python/export_onnx.py --origin_model /path/to/mobilenet_v2-b0353104 --output_model checkpoints/mobilenet_v2.onnx +``` ## Inference +```bash +export PROJ_DIR=./ +export DATASETS_DIR=/path/to/imagenet_val/ +export CHECKPOINTS_DIR=./checkpoints +export RUN_DIR=./ +``` + ### FP16 ```bash -cd python/ # Test ACC bash script/infer_mobilenetv2_fp16_accuracy.sh # Test FPS diff --git a/models/cv/classification/mobilenet_v2/ixrt/python/export_onnx.py b/models/cv/classification/mobilenet_v2/ixrt/python/export_onnx.py index 8351ffc8c89820b109aa0e09a0bc43104017fb29..4881a4c1793bf2da5ffbe86240f145b3387bd984 100644 --- a/models/cv/classification/mobilenet_v2/ixrt/python/export_onnx.py +++ b/models/cv/classification/mobilenet_v2/ixrt/python/export_onnx.py @@ -20,13 +20,15 @@ import argparse def parse_args(): parser = argparse.ArgumentParser() + parser.add_argument("--origin_model", type=str) parser.add_argument("--output_model", type=str) args = parser.parse_args() return args args = parse_args() -model = models.mobilenet_v2(pretrained=True) +model = models.mobilenet_v2() +model.load_state_dict(torch.load(args.origin_model)) model.cuda() model.eval() inputx = torch.randn(1, 3, 224, 224, device='cuda') @@ -42,3 +44,4 @@ torch.onnx.export(model, input_names = ['input'], output_names = ['output'],) print(f"Convert onnx model in {export_onnx_file}") +exit() diff --git a/models/cv/classification/mobilenet_v2/ixrt/requirements.txt b/models/cv/classification/mobilenet_v2/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..a6b325a766ce064d35b417190ddff6e23f1efe50 --- /dev/null +++ b/models/cv/classification/mobilenet_v2/ixrt/requirements.txt @@ -0,0 +1,5 @@ +tqdm +onnxsim +opencv-python +ppq +protobuf==3.20.0 \ No newline at end of file diff --git a/models/cv/classification/mobilenet_v2/ixrt/python/script/infer_mobilenetv2_fp16_accuracy.sh b/models/cv/classification/mobilenet_v2/ixrt/script/infer_mobilenetv2_fp16_accuracy.sh similarity index 80% rename from models/cv/classification/mobilenet_v2/ixrt/python/script/infer_mobilenetv2_fp16_accuracy.sh rename to models/cv/classification/mobilenet_v2/ixrt/script/infer_mobilenetv2_fp16_accuracy.sh index 93c3cda41bb1467bd020b127e39fdb4cc5a9d905..f3e4828de462dc6da18c16408d58ba56b679fc72 100644 --- a/models/cv/classification/mobilenet_v2/ixrt/python/script/infer_mobilenetv2_fp16_accuracy.sh +++ b/models/cv/classification/mobilenet_v2/ixrt/script/infer_mobilenetv2_fp16_accuracy.sh @@ -44,18 +44,18 @@ do esac done -RUN_DIR=$(cd $(dirname $0); cd ../; pwd) -PROJ_DIR=$(cd $(dirname $0); cd ../../../../../../../; pwd) -DATASETS_DIR="${PROJ_DIR}/data/datasets/imagenet_val" -CHECKPOINTS_DIR="${PROJ_DIR}/data/checkpoints/mobilenetv2" +RUN_DIR=${RUN_DIR} +PROJ_DIR=${PROJ_DIR} +DATASETS_DIR=${DATASETS_DIR} +CHECKPOINTS_DIR=${CHECKPOINTS_DIR} if [ ! -d $CHECKPOINTS_DIR ]; then mkdir -p $CHECKPOINTS_DIR fi -MODEL_NAME="mobilenetv2" -ORIGINE_MODEL="${CHECKPOINTS_DIR}/raw_mobilenetv2.onnx" +MODEL_NAME="mobilenet_v2" +ORIGINE_MODEL="${CHECKPOINTS_DIR}/mobilenet_v2.onnx" echo CHECKPOINTS_DIR : ${CHECKPOINTS_DIR} echo DATASETS_DIR : ${DATASETS_DIR} @@ -69,14 +69,6 @@ step=0 # Export Onnx Model let step++ echo; -echo [STEP ${step}] : Export Onnx Model -if [ -f ${ORIGINE_MODEL} ];then - echo " "Onnx Model, ${ORIGINE_MODEL} has been existed -else - python3 ${RUN_DIR}/export_onnx.py \ - --output_model $ORIGINE_MODEL - echo " "Generate ${ORIGINE_MODEL} -fi SIM_MODEL=${CHECKPOINTS_DIR}/${MODEL_NAME}_sim.onnx # Simplify Model @@ -86,7 +78,7 @@ echo [STEP ${step}] : Simplify Model if [ -f ${SIM_MODEL} ];then echo " "Simplify Model, ${SIM_MODEL} has been existed else - python3 ${RUN_DIR}/simplify_model.py \ + python3 ${RUN_DIR}python/simplify_model.py \ --origin_model $ORIGINE_MODEL \ --output_model ${SIM_MODEL} echo " "Generate ${SIM_MODEL} @@ -100,7 +92,7 @@ FINAL_MODEL=${CHECKPOINTS_DIR}/${MODEL_NAME}_${BSZ}.onnx if [ -f $FINAL_MODEL ];then echo " "Change Batchsize Skip, $FINAL_MODEL has been existed else - python3 ${RUN_DIR}/modify_batchsize.py \ + python3 ${RUN_DIR}python/modify_batchsize.py \ --batch_size ${BSZ} \ --origin_model ${SIM_MODEL} \ --output_model ${FINAL_MODEL} @@ -115,7 +107,7 @@ ENGINE_FILE=${CHECKPOINTS_DIR}/${MODEL_NAME}_${PRECISION}_bs${BSZ}.engine if [ -f $ENGINE_FILE ];then echo " "Build Engine Skip, $ENGINE_FILE has been existed else - python3 ${RUN_DIR}/build_engine.py \ + python3 ${RUN_DIR}python/build_engine.py \ --precision ${PRECISION} \ --model ${FINAL_MODEL} \ --engine ${ENGINE_FILE} @@ -126,7 +118,7 @@ fi let step++ echo; echo [STEP ${step}] : Inference -python3 ${RUN_DIR}/inference.py \ +python3 ${RUN_DIR}python/inference.py \ --engine_file=${ENGINE_FILE} \ --datasets_dir=${DATASETS_DIR} \ --imgsz=${IMGSIZE} \ diff --git a/models/cv/classification/mobilenet_v2/ixrt/python/script/infer_mobilenetv2_fp16_performance.sh b/models/cv/classification/mobilenet_v2/ixrt/script/infer_mobilenetv2_fp16_performance.sh similarity index 80% rename from models/cv/classification/mobilenet_v2/ixrt/python/script/infer_mobilenetv2_fp16_performance.sh rename to models/cv/classification/mobilenet_v2/ixrt/script/infer_mobilenetv2_fp16_performance.sh index 1042fb91f244ce59509baa56381f91ae609369b8..1057dadb582ac362146e6dc6fd72654a556f371d 100644 --- a/models/cv/classification/mobilenet_v2/ixrt/python/script/infer_mobilenetv2_fp16_performance.sh +++ b/models/cv/classification/mobilenet_v2/ixrt/script/infer_mobilenetv2_fp16_performance.sh @@ -44,18 +44,18 @@ do esac done -RUN_DIR=$(cd $(dirname $0); cd ../; pwd) -PROJ_DIR=$(cd $(dirname $0); cd ../../../../../../../; pwd) -DATASETS_DIR="${PROJ_DIR}/data/datasets/imagenet_val" -CHECKPOINTS_DIR="${PROJ_DIR}/data/checkpoints/mobilenetv2" +RUN_DIR=${RUN_DIR} +PROJ_DIR=${PROJ_DIR} +DATASETS_DIR=${DATASETS_DIR} +CHECKPOINTS_DIR=${CHECKPOINTS_DIR} if [ ! -d $CHECKPOINTS_DIR ]; then mkdir -p $CHECKPOINTS_DIR fi -MODEL_NAME="mobilenetv2" -ORIGINE_MODEL="${CHECKPOINTS_DIR}/raw_mobilenetv2.onnx" +MODEL_NAME="mobilenet_v2" +ORIGINE_MODEL="${CHECKPOINTS_DIR}/mobilenet_v2.onnx" echo CHECKPOINTS_DIR : ${CHECKPOINTS_DIR} echo DATASETS_DIR : ${DATASETS_DIR} @@ -69,14 +69,6 @@ step=0 # Export Onnx Model let step++ echo; -echo [STEP ${step}] : Export Onnx Model -if [ -f ${ORIGINE_MODEL} ];then - echo " "Onnx Model, ${ORIGINE_MODEL} has been existed -else - python3 ${RUN_DIR}/export_onnx.py \ - --output_model $ORIGINE_MODEL - echo " "Generate ${ORIGINE_MODEL} -fi SIM_MODEL=${CHECKPOINTS_DIR}/${MODEL_NAME}_sim.onnx # Simplify Model @@ -86,7 +78,7 @@ echo [STEP ${step}] : Simplify Model if [ -f ${SIM_MODEL} ];then echo " "Simplify Model, ${SIM_MODEL} has been existed else - python3 ${RUN_DIR}/simplify_model.py \ + python3 ${RUN_DIR}python/simplify_model.py \ --origin_model $ORIGINE_MODEL \ --output_model ${SIM_MODEL} echo " "Generate ${SIM_MODEL} @@ -100,7 +92,7 @@ FINAL_MODEL=${CHECKPOINTS_DIR}/${MODEL_NAME}_${BSZ}.onnx if [ -f $FINAL_MODEL ];then echo " "Change Batchsize Skip, $FINAL_MODEL has been existed else - python3 ${RUN_DIR}/modify_batchsize.py \ + python3 ${RUN_DIR}python/modify_batchsize.py \ --batch_size ${BSZ} \ --origin_model ${SIM_MODEL} \ --output_model ${FINAL_MODEL} @@ -115,7 +107,7 @@ ENGINE_FILE=${CHECKPOINTS_DIR}/${MODEL_NAME}_${PRECISION}_bs${BSZ}.engine if [ -f $ENGINE_FILE ];then echo " "Build Engine Skip, $ENGINE_FILE has been existed else - python3 ${RUN_DIR}/build_engine.py \ + python3 ${RUN_DIR}python/build_engine.py \ --precision ${PRECISION} \ --model ${FINAL_MODEL} \ --engine ${ENGINE_FILE} @@ -126,7 +118,7 @@ fi let step++ echo; echo [STEP ${step}] : Inference -python3 ${RUN_DIR}/inference.py \ +python3 ${RUN_DIR}python/inference.py \ --engine_file=${ENGINE_FILE} \ --datasets_dir=${DATASETS_DIR} \ --imgsz=${IMGSIZE} \ diff --git a/models/cv/classification/mobilenet_v2/ixrt/python/script/infer_mobilenetv2_int8_accuracy.sh b/models/cv/classification/mobilenet_v2/ixrt/script/infer_mobilenetv2_int8_accuracy.sh similarity index 82% rename from models/cv/classification/mobilenet_v2/ixrt/python/script/infer_mobilenetv2_int8_accuracy.sh rename to models/cv/classification/mobilenet_v2/ixrt/script/infer_mobilenetv2_int8_accuracy.sh index 7c32c7892a7867574380ee53311637cc15f4ce61..95653d9ff43497fcb5d994f51218588963733b0f 100644 --- a/models/cv/classification/mobilenet_v2/ixrt/python/script/infer_mobilenetv2_int8_accuracy.sh +++ b/models/cv/classification/mobilenet_v2/ixrt/script/infer_mobilenetv2_int8_accuracy.sh @@ -44,18 +44,18 @@ do esac done -RUN_DIR=$(cd $(dirname $0); cd ../; pwd) -PROJ_DIR=$(cd $(dirname $0); cd ../../../../../../../; pwd) -DATASETS_DIR="${PROJ_DIR}/data/datasets/imagenet_val" -CHECKPOINTS_DIR="${PROJ_DIR}/data/checkpoints/mobilenetv2" +RUN_DIR=${RUN_DIR} +PROJ_DIR=${PROJ_DIR} +DATASETS_DIR=${DATASETS_DIR} +CHECKPOINTS_DIR=${CHECKPOINTS_DIR} if [ ! -d $CHECKPOINTS_DIR ]; then mkdir -p $CHECKPOINTS_DIR fi -MODEL_NAME="mobilenetv2" -ORIGINE_MODEL="${CHECKPOINTS_DIR}/raw_mobilenetv2.onnx" +MODEL_NAME="mobilenet_v2" +ORIGINE_MODEL="${CHECKPOINTS_DIR}/mobilenet_v2.onnx" echo CHECKPOINTS_DIR : ${CHECKPOINTS_DIR} echo DATASETS_DIR : ${DATASETS_DIR} @@ -69,14 +69,6 @@ step=0 # Export Onnx Model let step++ echo; -echo [STEP ${step}] : Export Onnx Model -if [ -f ${ORIGINE_MODEL} ];then - echo " "Onnx Model, ${ORIGINE_MODEL} has been existed -else - python3 ${RUN_DIR}/export_onnx.py \ - --output_model $ORIGINE_MODEL - echo " "Generate ${ORIGINE_MODEL} -fi SIM_MODEL=${CHECKPOINTS_DIR}/${MODEL_NAME}_sim.onnx # Simplify Model @@ -86,7 +78,7 @@ echo [STEP ${step}] : Simplify Model if [ -f ${SIM_MODEL} ];then echo " "Simplify Model, ${SIM_MODEL} has been existed else - python3 ${RUN_DIR}/simplify_model.py \ + python3 ${RUN_DIR}python/simplify_model.py \ --origin_model $ORIGINE_MODEL \ --output_model ${SIM_MODEL} echo " "Generate ${SIM_MODEL} @@ -100,7 +92,7 @@ FINAL_MODEL=${CHECKPOINTS_DIR}/${MODEL_NAME}_${BSZ}.onnx if [ -f $FINAL_MODEL ];then echo " "Change Batchsize Skip, $FINAL_MODEL has been existed else - python3 ${RUN_DIR}/modify_batchsize.py \ + python3 ${RUN_DIR}python/modify_batchsize.py \ --batch_size ${BSZ} \ --origin_model ${SIM_MODEL} \ --output_model ${FINAL_MODEL} @@ -116,7 +108,7 @@ QUANTIZED_Q_PARAMS_JSON=${CHECKPOINTS_DIR}/quant_cfg.json if [ -f $QUANTIZED_MODEL ];then echo " "Quantized Model Skip By PPQ, $QUANTIZED_MODEL has been existed else - python3 ${RUN_DIR}/quant.py \ + python3 ${RUN_DIR}python/quant.py \ --model_name ${MODEL_NAME} \ --model ${FINAL_MODEL} \ --dataset_dir ${DATASETS_DIR} \ @@ -132,7 +124,7 @@ ENGINE_FILE=${CHECKPOINTS_DIR}/${MODEL_NAME}_${PRECISION}_bs${BSZ}.engine if [ -f $ENGINE_FILE ];then echo " "Build Engine Skip, $ENGINE_FILE has been existed else - python3 ${RUN_DIR}/build_engine_by_write_qparams.py \ + python3 ${RUN_DIR}python/build_engine_by_write_qparams.py \ --onnx ${QUANTIZED_MODEL} \ --qparam_json ${QUANTIZED_Q_PARAMS_JSON} \ --engine ${ENGINE_FILE} @@ -143,7 +135,7 @@ fi let step++ echo; echo [STEP ${step}] : Inference -python3 ${RUN_DIR}/inference.py \ +python3 ${RUN_DIR}python/inference.py \ --engine_file=${ENGINE_FILE} \ --datasets_dir=${DATASETS_DIR} \ --imgsz=${IMGSIZE} \ diff --git a/models/cv/classification/mobilenet_v2/ixrt/python/script/infer_mobilenetv2_int8_performance.sh b/models/cv/classification/mobilenet_v2/ixrt/script/infer_mobilenetv2_int8_performance.sh similarity index 82% rename from models/cv/classification/mobilenet_v2/ixrt/python/script/infer_mobilenetv2_int8_performance.sh rename to models/cv/classification/mobilenet_v2/ixrt/script/infer_mobilenetv2_int8_performance.sh index 196a5090411f3427cdccad1b39eabd8af6206379..739f62cbed80281cca4f1ab811468d59bb200c0f 100644 --- a/models/cv/classification/mobilenet_v2/ixrt/python/script/infer_mobilenetv2_int8_performance.sh +++ b/models/cv/classification/mobilenet_v2/ixrt/script/infer_mobilenetv2_int8_performance.sh @@ -44,18 +44,18 @@ do esac done -RUN_DIR=$(cd $(dirname $0); cd ../; pwd) -PROJ_DIR=$(cd $(dirname $0); cd ../../../../../../../; pwd) -DATASETS_DIR="${PROJ_DIR}/data/datasets/imagenet_val" -CHECKPOINTS_DIR="${PROJ_DIR}/data/checkpoints/mobilenetv2" +RUN_DIR=${RUN_DIR} +PROJ_DIR=${PROJ_DIR} +DATASETS_DIR=${DATASETS_DIR} +CHECKPOINTS_DIR=${CHECKPOINTS_DIR} if [ ! -d $CHECKPOINTS_DIR ]; then mkdir -p $CHECKPOINTS_DIR fi -MODEL_NAME="mobilenetv2" -ORIGINE_MODEL="${CHECKPOINTS_DIR}/raw_mobilenetv2.onnx" +MODEL_NAME="mobilenet_v2" +ORIGINE_MODEL="${CHECKPOINTS_DIR}/mobilenet_v2.onnx" echo CHECKPOINTS_DIR : ${CHECKPOINTS_DIR} echo DATASETS_DIR : ${DATASETS_DIR} @@ -69,14 +69,6 @@ step=0 # Export Onnx Model let step++ echo; -echo [STEP ${step}] : Export Onnx Model -if [ -f ${ORIGINE_MODEL} ];then - echo " "Onnx Model, ${ORIGINE_MODEL} has been existed -else - python3 ${RUN_DIR}/export_onnx.py \ - --output_model $ORIGINE_MODEL - echo " "Generate ${ORIGINE_MODEL} -fi SIM_MODEL=${CHECKPOINTS_DIR}/${MODEL_NAME}_sim.onnx # Simplify Model @@ -86,7 +78,7 @@ echo [STEP ${step}] : Simplify Model if [ -f ${SIM_MODEL} ];then echo " "Simplify Model, ${SIM_MODEL} has been existed else - python3 ${RUN_DIR}/simplify_model.py \ + python3 ${RUN_DIR}python/simplify_model.py \ --origin_model $ORIGINE_MODEL \ --output_model ${SIM_MODEL} echo " "Generate ${SIM_MODEL} @@ -100,7 +92,7 @@ FINAL_MODEL=${CHECKPOINTS_DIR}/${MODEL_NAME}_${BSZ}.onnx if [ -f $FINAL_MODEL ];then echo " "Change Batchsize Skip, $FINAL_MODEL has been existed else - python3 ${RUN_DIR}/modify_batchsize.py \ + python3 ${RUN_DIR}python/modify_batchsize.py \ --batch_size ${BSZ} \ --origin_model ${SIM_MODEL} \ --output_model ${FINAL_MODEL} @@ -116,7 +108,7 @@ QUANTIZED_Q_PARAMS_JSON=${CHECKPOINTS_DIR}/quant_cfg.json if [ -f $QUANTIZED_MODEL ];then echo " "Quantized Model Skip By PPQ, $QUANTIZED_MODEL has been existed else - python3 ${RUN_DIR}/quant.py \ + python3 ${RUN_DIR}python/quant.py \ --model_name ${MODEL_NAME} \ --model ${FINAL_MODEL} \ --dataset_dir ${DATASETS_DIR} \ @@ -132,7 +124,7 @@ ENGINE_FILE=${CHECKPOINTS_DIR}/${MODEL_NAME}_${PRECISION}_bs${BSZ}.engine if [ -f $ENGINE_FILE ];then echo " "Build Engine Skip, $ENGINE_FILE has been existed else - python3 ${RUN_DIR}/build_engine_by_write_qparams.py \ + python3 ${RUN_DIR}python/build_engine_by_write_qparams.py \ --onnx ${QUANTIZED_MODEL} \ --qparam_json ${QUANTIZED_Q_PARAMS_JSON} \ --engine ${ENGINE_FILE} @@ -143,7 +135,7 @@ fi let step++ echo; echo [STEP ${step}] : Inference -python3 ${RUN_DIR}/inference.py \ +python3 ${RUN_DIR}python/inference.py \ --engine_file=${ENGINE_FILE} \ --datasets_dir=${DATASETS_DIR} \ --imgsz=${IMGSIZE} \ diff --git a/models/cv/classification/mobilenet_v3/ixrt/README.md b/models/cv/classification/mobilenet_v3/ixrt/README.md index b1ef8f3dd97b24c8ea0ed9b426434446a2fdf68d..1ff5e158f9764e81c2977df60ef76b1bf6432c42 100644 --- a/models/cv/classification/mobilenet_v3/ixrt/README.md +++ b/models/cv/classification/mobilenet_v3/ixrt/README.md @@ -15,10 +15,7 @@ yum install -y mesa-libGL ## Ubuntu apt install -y libgl1-mesa-dev -pip3 install tqdm -pip3 install onnx -pip3 install onnxsim -pip3 install tabulate +pip3 install -r requirements.txt ``` ### Download @@ -31,7 +28,7 @@ Dataset: to download the validation dat ```bash mkdir checkpoints -python3 export_onnx.py --origin_model /path/to/mobilenet_v3_small-047dcff4.pth --output_model checkpoints/mobilenetv3.onnx +python3 export_onnx.py --origin_model /path/to/mobilenet_v3_small-047dcff4.pth --output_model checkpoints/mobilenet_v3.onnx ``` ## Inference @@ -41,16 +38,16 @@ export PROJ_DIR=./ export DATASETS_DIR=/path/to/imagenet_val/ export CHECKPOINTS_DIR=./checkpoints export RUN_DIR=./ -export CONFIG_DIR=config/MOBILENETV3_CONFIG +export CONFIG_DIR=config/MOBILENET_V3_CONFIG ``` ### FP16 ```bash # Accuracy -bash scripts/infer_mobilenetv3_fp16_accuracy.sh +bash scripts/infer_mobilenet_v3_fp16_accuracy.sh # Performance -bash scripts/infer_mobilenetv3_fp16_performance.sh +bash scripts/infer_mobilenet_v3_fp16_performance.sh ``` ## Results diff --git a/models/cv/classification/mobilenet_v3/ixrt/config/MOBILENETV3_CONFIG b/models/cv/classification/mobilenet_v3/ixrt/config/MOBILENET_V3_CONFIG similarity index 97% rename from models/cv/classification/mobilenet_v3/ixrt/config/MOBILENETV3_CONFIG rename to models/cv/classification/mobilenet_v3/ixrt/config/MOBILENET_V3_CONFIG index cd97156638c6e3030e5420cc8355972729b03bf7..aef74a6c84b70973cd498fbd4c4e97f9b99ab7a4 100644 --- a/models/cv/classification/mobilenet_v3/ixrt/config/MOBILENETV3_CONFIG +++ b/models/cv/classification/mobilenet_v3/ixrt/config/MOBILENET_V3_CONFIG @@ -17,7 +17,7 @@ # ORIGINE_MODEL : 原始onnx文件名称 IMGSIZE=224 MODEL_NAME=MobileNet_v3 -ORIGINE_MODEL=mobilenetv3.onnx +ORIGINE_MODEL=mobilenet_v3.onnx # QUANT CONFIG (仅PRECISION为int8时生效) # QUANT_OBSERVER : 量化策略,可选 [hist_percentile, percentile, minmax, entropy, ema] diff --git a/models/cv/classification/mobilenet_v3/ixrt/inference.py b/models/cv/classification/mobilenet_v3/ixrt/inference.py index 2c9dcb3f9cc5b9a26903651a31fafa16d8f0db31..50aafd4fd5ef9664203cdcbdfbdb577edca933c4 100644 --- a/models/cv/classification/mobilenet_v3/ixrt/inference.py +++ b/models/cv/classification/mobilenet_v3/ixrt/inference.py @@ -83,6 +83,7 @@ def main(config): total_sample = 0 acc_top1, acc_top5 = 0, 0 + start_time = time.time() with tqdm(total= len(dataloader)) as _tqdm: for idx, (batch_data, batch_label) in enumerate(dataloader): batch_data = batch_data.numpy().astype(inputs[0]["dtype"]) @@ -104,7 +105,10 @@ def main(config): _tqdm.set_postfix(acc_1='{:.4f}'.format(acc_top1/total_sample), acc_5='{:.4f}'.format(acc_top5/total_sample)) _tqdm.update(1) + end_time = time.time() + end2end_time = end_time - start_time + print(F"E2E time : {end2end_time:.3f} seconds") print(F"Acc@1 : {acc_top1/total_sample} = {acc_top1}/{total_sample}") print(F"Acc@5 : {acc_top5/total_sample} = {acc_top5}/{total_sample}") acc1 = acc_top1/total_sample diff --git a/models/cv/classification/mobilenet_v3/ixrt/requirements.txt b/models/cv/classification/mobilenet_v3/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e1eda59c3910ca96c73128bab86d534dbd55bbae --- /dev/null +++ b/models/cv/classification/mobilenet_v3/ixrt/requirements.txt @@ -0,0 +1,4 @@ +tqdm +onnx +onnxsim +tabulate \ No newline at end of file diff --git a/models/cv/classification/mobilenet_v3/ixrt/scripts/infer_mobilenetv3_fp16_accuracy.sh b/models/cv/classification/mobilenet_v3/ixrt/scripts/infer_mobilenet_v3_fp16_accuracy.sh similarity index 100% rename from models/cv/classification/mobilenet_v3/ixrt/scripts/infer_mobilenetv3_fp16_accuracy.sh rename to models/cv/classification/mobilenet_v3/ixrt/scripts/infer_mobilenet_v3_fp16_accuracy.sh diff --git a/models/cv/classification/mobilenet_v3/ixrt/scripts/infer_mobilenetv3_fp16_performance.sh b/models/cv/classification/mobilenet_v3/ixrt/scripts/infer_mobilenet_v3_fp16_performance.sh similarity index 100% rename from models/cv/classification/mobilenet_v3/ixrt/scripts/infer_mobilenetv3_fp16_performance.sh rename to models/cv/classification/mobilenet_v3/ixrt/scripts/infer_mobilenet_v3_fp16_performance.sh diff --git a/models/cv/classification/res2net50/ixrt/README.md b/models/cv/classification/res2net50/ixrt/README.md index 9c381613818d34de041ad5725b69e6291623c734..1ec7a25194062661cc526a773ca7755aac493b66 100644 --- a/models/cv/classification/res2net50/ixrt/README.md +++ b/models/cv/classification/res2net50/ixrt/README.md @@ -15,21 +15,20 @@ yum install -y mesa-libGL ## Ubuntu apt install -y libgl1-mesa-dev -pip3 install tqdm -pip3 install onnx -pip3 install onnxsim -pip3 install tabulate +pip3 install -r requirements.txt ``` ### Download +Pretrained model: + Dataset: to download the validation dataset. ### Model Conversion ```bash mkdir checkpoints -python3 export_onnx.py --output_model checkpoints/res2net50.onnx +python3 export_onnx.py --origin_model /path/to/res2net50_14w_8s-6527dddc.pth --output_model checkpoints/res2net50.onnx ``` ## Inference diff --git a/models/cv/classification/res2net50/ixrt/export_onnx.py b/models/cv/classification/res2net50/ixrt/export_onnx.py index c85685e5bf3f9eb0540cae29d49d742e529e6ed9..ebc4a2f2efc260f94a50ee1ff83eca33268a3709 100644 --- a/models/cv/classification/res2net50/ixrt/export_onnx.py +++ b/models/cv/classification/res2net50/ixrt/export_onnx.py @@ -164,25 +164,17 @@ class Res2Net(nn.Module): return x -def res2net50_14w_8s(pretrained=False, **kwargs): - """Constructs a Res2Net-50_14w_8s model. - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 14, scale = 8, **kwargs) - if pretrained: - model.load_state_dict(model_zoo.load_url(model_urls['res2net50_14w_8s'])) - return model - def parse_args(): parser = argparse.ArgumentParser() + parser.add_argument("--origin_model", type=str) parser.add_argument("--output_model", type=str) args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() - model = res2net50_14w_8s(pretrained=True) + model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth = 14, scale = 8) + model.load_state_dict(torch.load(args.origin_model)) model.cuda() model.eval() input = torch.randn(32, 3, 224, 224, device='cuda') diff --git a/models/cv/classification/res2net50/ixrt/inference.py b/models/cv/classification/res2net50/ixrt/inference.py index 2c9dcb3f9cc5b9a26903651a31fafa16d8f0db31..50aafd4fd5ef9664203cdcbdfbdb577edca933c4 100644 --- a/models/cv/classification/res2net50/ixrt/inference.py +++ b/models/cv/classification/res2net50/ixrt/inference.py @@ -83,6 +83,7 @@ def main(config): total_sample = 0 acc_top1, acc_top5 = 0, 0 + start_time = time.time() with tqdm(total= len(dataloader)) as _tqdm: for idx, (batch_data, batch_label) in enumerate(dataloader): batch_data = batch_data.numpy().astype(inputs[0]["dtype"]) @@ -104,7 +105,10 @@ def main(config): _tqdm.set_postfix(acc_1='{:.4f}'.format(acc_top1/total_sample), acc_5='{:.4f}'.format(acc_top5/total_sample)) _tqdm.update(1) + end_time = time.time() + end2end_time = end_time - start_time + print(F"E2E time : {end2end_time:.3f} seconds") print(F"Acc@1 : {acc_top1/total_sample} = {acc_top1}/{total_sample}") print(F"Acc@5 : {acc_top5/total_sample} = {acc_top5}/{total_sample}") acc1 = acc_top1/total_sample diff --git a/models/cv/classification/res2net50/ixrt/requirements.txt b/models/cv/classification/res2net50/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e1eda59c3910ca96c73128bab86d534dbd55bbae --- /dev/null +++ b/models/cv/classification/res2net50/ixrt/requirements.txt @@ -0,0 +1,4 @@ +tqdm +onnx +onnxsim +tabulate \ No newline at end of file diff --git a/models/cv/classification/resnet18/ixrt/README.md b/models/cv/classification/resnet18/ixrt/README.md index f78e7696ef5e69055a2f58fab41192958f72e419..71857e6566130c3daf7787a1314ecc1e947205e3 100644 --- a/models/cv/classification/resnet18/ixrt/README.md +++ b/models/cv/classification/resnet18/ixrt/README.md @@ -15,21 +15,20 @@ yum install -y mesa-libGL ## Ubuntu apt install -y libgl1-mesa-dev -pip3 install tqdm -pip3 install onnx -pip3 install onnxsim -pip3 install tabulate +pip3 install -r requirements.txt ``` ### Download +Pretrained model: + Dataset: to download the validation dataset. ### Model Conversion ```bash mkdir checkpoints -python3 export_onnx.py --output_model checkpoints/resnet18.onnx +python3 export_onnx.py --origin_model /path/to/resnet18-f37072fd.pth --output_model checkpoints/resnet18.onnx ``` ## Inference diff --git a/models/cv/classification/resnet18/ixrt/export_onnx.py b/models/cv/classification/resnet18/ixrt/export_onnx.py index 3f963af607436a492d204478ab86d2183600e85e..708a205ad78f86c0bad535875eb4c1e65f560f09 100644 --- a/models/cv/classification/resnet18/ixrt/export_onnx.py +++ b/models/cv/classification/resnet18/ixrt/export_onnx.py @@ -18,12 +18,14 @@ import argparse def parse_args(): parser = argparse.ArgumentParser() + parser.add_argument("--origin_model", type=str) parser.add_argument("--output_model", type=str) args = parser.parse_args() return args args = parse_args() -model = models.resnet18(pretrained=True) +model = models.resnet18() +model.load_state_dict(torch.load(args.origin_model)) model.cuda() model.eval() input = torch.randn(1, 3, 224, 224, device='cuda') diff --git a/models/cv/classification/resnet18/ixrt/inference.py b/models/cv/classification/resnet18/ixrt/inference.py index 1ec56b4a1f09ee4bd7516461f758ac121a5346a0..4e178df4059e16e61daf0644862f27dd5fe99fbe 100644 --- a/models/cv/classification/resnet18/ixrt/inference.py +++ b/models/cv/classification/resnet18/ixrt/inference.py @@ -84,6 +84,7 @@ def main(config): total_sample = 0 acc_top1, acc_top5 = 0, 0 + start_time = time.time() with tqdm(total= len(dataloader)) as _tqdm: for idx, (batch_data, batch_label) in enumerate(dataloader): batch_data = batch_data.numpy().astype(inputs[0]["dtype"]) @@ -105,7 +106,10 @@ def main(config): _tqdm.set_postfix(acc_1='{:.4f}'.format(acc_top1/total_sample), acc_5='{:.4f}'.format(acc_top5/total_sample)) _tqdm.update(1) + end_time = time.time() + end2end_time = end_time - start_time + print(F"E2E time : {end2end_time:.3f} seconds") print(F"Acc@1 : {acc_top1/total_sample} = {acc_top1}/{total_sample}") print(F"Acc@5 : {acc_top5/total_sample} = {acc_top5}/{total_sample}") acc1 = acc_top1/total_sample diff --git a/models/cv/classification/resnet18/ixrt/requirements.txt b/models/cv/classification/resnet18/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e1eda59c3910ca96c73128bab86d534dbd55bbae --- /dev/null +++ b/models/cv/classification/resnet18/ixrt/requirements.txt @@ -0,0 +1,4 @@ +tqdm +onnx +onnxsim +tabulate \ No newline at end of file diff --git a/models/cv/classification/shufflenet_v1/ixrt/README.md b/models/cv/classification/shufflenet_v1/ixrt/README.md index e29de33a7591d2009687d454676a54b5735d3012..4dee5cae70e0259f6abf2d47d90b6689d3e0b758 100644 --- a/models/cv/classification/shufflenet_v1/ixrt/README.md +++ b/models/cv/classification/shufflenet_v1/ixrt/README.md @@ -16,17 +16,13 @@ yum install -y mesa-libGL ## Ubuntu apt install -y libgl1-mesa-dev -pip3 install tqdm -pip3 install tabulate -pip3 install onnx -pip3 install onnxsim -pip3 install opencv-python==4.6.0.66 -pip3 install mmcls==0.24.0 -pip3 install mmcv==1.5.3 +pip3 install -r requirements.txt ``` ### Download +Pretrained model: + Dataset: to download the validation dataset. ### Model Conversion @@ -39,7 +35,7 @@ cd .. python3 export_onnx.py \ --config_file ./checkpoints/mmpretrain/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py \ - --checkpoint_file https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth \ + --checkpoint_file ./shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth \ --output_model ./checkpoints/shufflenet_v1.onnx ``` diff --git a/models/cv/classification/shufflenet_v1/ixrt/inference.py b/models/cv/classification/shufflenet_v1/ixrt/inference.py index 1ec56b4a1f09ee4bd7516461f758ac121a5346a0..4e178df4059e16e61daf0644862f27dd5fe99fbe 100644 --- a/models/cv/classification/shufflenet_v1/ixrt/inference.py +++ b/models/cv/classification/shufflenet_v1/ixrt/inference.py @@ -84,6 +84,7 @@ def main(config): total_sample = 0 acc_top1, acc_top5 = 0, 0 + start_time = time.time() with tqdm(total= len(dataloader)) as _tqdm: for idx, (batch_data, batch_label) in enumerate(dataloader): batch_data = batch_data.numpy().astype(inputs[0]["dtype"]) @@ -105,7 +106,10 @@ def main(config): _tqdm.set_postfix(acc_1='{:.4f}'.format(acc_top1/total_sample), acc_5='{:.4f}'.format(acc_top5/total_sample)) _tqdm.update(1) + end_time = time.time() + end2end_time = end_time - start_time + print(F"E2E time : {end2end_time:.3f} seconds") print(F"Acc@1 : {acc_top1/total_sample} = {acc_top1}/{total_sample}") print(F"Acc@5 : {acc_top5/total_sample} = {acc_top5}/{total_sample}") acc1 = acc_top1/total_sample diff --git a/models/cv/classification/shufflenet_v1/ixrt/requirements.txt b/models/cv/classification/shufflenet_v1/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..fdd84a5d3a5ddc461faddea827aa328528ef8f88 --- /dev/null +++ b/models/cv/classification/shufflenet_v1/ixrt/requirements.txt @@ -0,0 +1,7 @@ +tqdm +tabulate +onnx +onnxsim +opencv-python==4.6.0.66 +mmcls==0.24.0 +mmcv==1.5.3 \ No newline at end of file diff --git a/models/cv/classification/squeezenet_1.1/ixrt/README.md b/models/cv/classification/squeezenet_v11/ixrt/README.md similarity index 87% rename from models/cv/classification/squeezenet_1.1/ixrt/README.md rename to models/cv/classification/squeezenet_v11/ixrt/README.md index 08fe037a0c7c3f1037b531c440dc553d43ebdb60..df5b02d4e3e120381f69995591364b1edcb01524 100644 --- a/models/cv/classification/squeezenet_1.1/ixrt/README.md +++ b/models/cv/classification/squeezenet_v11/ixrt/README.md @@ -17,13 +17,7 @@ yum install -y mesa-libGL ## Ubuntu apt install -y libgl1-mesa-dev -pip3 install tqdm -pip3 install onnx -pip3 install onnxsim -pip3 install tabulate -pip3 install ppq -pip3 install pycuda -pip3 install opencv-python==4.6.0.66 +pip3 install -r requirements.txt ``` ### Download @@ -36,7 +30,7 @@ Dataset: to download the validation dat ```bash mkdir checkpoints -python3 export_onnx.py --origin_model /path/to/squeezenet1_1-b8a52dc0.pth --output_model checkpoints/squeezenetv11.onnx +python3 export_onnx.py --origin_model /path/to/squeezenet1_1-b8a52dc0.pth --output_model checkpoints/squeezenet_v11.onnx ``` ## Inference diff --git a/models/cv/classification/squeezenet_1.1/ixrt/build_engine.py b/models/cv/classification/squeezenet_v11/ixrt/build_engine.py similarity index 100% rename from models/cv/classification/squeezenet_1.1/ixrt/build_engine.py rename to models/cv/classification/squeezenet_v11/ixrt/build_engine.py diff --git a/models/cv/classification/squeezenet_1.1/ixrt/build_i8_engine.py b/models/cv/classification/squeezenet_v11/ixrt/build_i8_engine.py similarity index 100% rename from models/cv/classification/squeezenet_1.1/ixrt/build_i8_engine.py rename to models/cv/classification/squeezenet_v11/ixrt/build_i8_engine.py diff --git a/models/cv/classification/squeezenet_1.1/ixrt/calibration_dataset.py b/models/cv/classification/squeezenet_v11/ixrt/calibration_dataset.py similarity index 100% rename from models/cv/classification/squeezenet_1.1/ixrt/calibration_dataset.py rename to models/cv/classification/squeezenet_v11/ixrt/calibration_dataset.py diff --git a/models/cv/classification/squeezenet_1.1/ixrt/common.py b/models/cv/classification/squeezenet_v11/ixrt/common.py similarity index 100% rename from models/cv/classification/squeezenet_1.1/ixrt/common.py rename to models/cv/classification/squeezenet_v11/ixrt/common.py diff --git a/models/cv/classification/squeezenet_1.1/ixrt/config/SQUEEZENET_V11_CONFIG b/models/cv/classification/squeezenet_v11/ixrt/config/SQUEEZENET_V11_CONFIG similarity index 95% rename from models/cv/classification/squeezenet_1.1/ixrt/config/SQUEEZENET_V11_CONFIG rename to models/cv/classification/squeezenet_v11/ixrt/config/SQUEEZENET_V11_CONFIG index efc2a0870434a41575d490a17821a8ad816c81b9..19d35c97654c126bf70a5c0eeaabab48b7fe291f 100644 --- a/models/cv/classification/squeezenet_1.1/ixrt/config/SQUEEZENET_V11_CONFIG +++ b/models/cv/classification/squeezenet_v11/ixrt/config/SQUEEZENET_V11_CONFIG @@ -3,7 +3,7 @@ # ORIGINE_MODEL : 原始onnx文件名称 IMGSIZE=224 MODEL_NAME=SqueezeNet_v11 -ORIGINE_MODEL=squeezenetv11.onnx +ORIGINE_MODEL=squeezenet_v11.onnx # QUANT CONFIG (仅PRECISION为int8时生效) # QUANT_OBSERVER : 量化策略,可选 [hist_percentile, percentile, minmax, entropy, ema] diff --git a/models/cv/classification/squeezenet_1.1/ixrt/export_onnx.py b/models/cv/classification/squeezenet_v11/ixrt/export_onnx.py similarity index 100% rename from models/cv/classification/squeezenet_1.1/ixrt/export_onnx.py rename to models/cv/classification/squeezenet_v11/ixrt/export_onnx.py diff --git a/models/cv/classification/squeezenet_1.1/ixrt/inference.py b/models/cv/classification/squeezenet_v11/ixrt/inference.py similarity index 96% rename from models/cv/classification/squeezenet_1.1/ixrt/inference.py rename to models/cv/classification/squeezenet_v11/ixrt/inference.py index 1ec56b4a1f09ee4bd7516461f758ac121a5346a0..4e178df4059e16e61daf0644862f27dd5fe99fbe 100644 --- a/models/cv/classification/squeezenet_1.1/ixrt/inference.py +++ b/models/cv/classification/squeezenet_v11/ixrt/inference.py @@ -84,6 +84,7 @@ def main(config): total_sample = 0 acc_top1, acc_top5 = 0, 0 + start_time = time.time() with tqdm(total= len(dataloader)) as _tqdm: for idx, (batch_data, batch_label) in enumerate(dataloader): batch_data = batch_data.numpy().astype(inputs[0]["dtype"]) @@ -105,7 +106,10 @@ def main(config): _tqdm.set_postfix(acc_1='{:.4f}'.format(acc_top1/total_sample), acc_5='{:.4f}'.format(acc_top5/total_sample)) _tqdm.update(1) + end_time = time.time() + end2end_time = end_time - start_time + print(F"E2E time : {end2end_time:.3f} seconds") print(F"Acc@1 : {acc_top1/total_sample} = {acc_top1}/{total_sample}") print(F"Acc@5 : {acc_top5/total_sample} = {acc_top5}/{total_sample}") acc1 = acc_top1/total_sample diff --git a/models/cv/classification/squeezenet_1.1/ixrt/modify_batchsize.py b/models/cv/classification/squeezenet_v11/ixrt/modify_batchsize.py similarity index 100% rename from models/cv/classification/squeezenet_1.1/ixrt/modify_batchsize.py rename to models/cv/classification/squeezenet_v11/ixrt/modify_batchsize.py diff --git a/models/cv/classification/squeezenet_1.1/ixrt/quant.py b/models/cv/classification/squeezenet_v11/ixrt/quant.py similarity index 100% rename from models/cv/classification/squeezenet_1.1/ixrt/quant.py rename to models/cv/classification/squeezenet_v11/ixrt/quant.py diff --git a/models/cv/classification/squeezenet_1.1/ixrt/refine_model.py b/models/cv/classification/squeezenet_v11/ixrt/refine_model.py similarity index 100% rename from models/cv/classification/squeezenet_1.1/ixrt/refine_model.py rename to models/cv/classification/squeezenet_v11/ixrt/refine_model.py diff --git a/models/cv/classification/squeezenet_v11/ixrt/requirements.txt b/models/cv/classification/squeezenet_v11/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e4d443b8e992b2c8b0184ab649efff0ba2f0a458 --- /dev/null +++ b/models/cv/classification/squeezenet_v11/ixrt/requirements.txt @@ -0,0 +1,7 @@ +tqdm +onnx +onnxsim +tabulate +ppq +pycuda +opencv-python==4.6.0.66 \ No newline at end of file diff --git a/models/cv/classification/squeezenet_1.1/ixrt/scripts/infer_squeezenet_v11_fp16_accuracy.sh b/models/cv/classification/squeezenet_v11/ixrt/scripts/infer_squeezenet_v11_fp16_accuracy.sh similarity index 100% rename from models/cv/classification/squeezenet_1.1/ixrt/scripts/infer_squeezenet_v11_fp16_accuracy.sh rename to models/cv/classification/squeezenet_v11/ixrt/scripts/infer_squeezenet_v11_fp16_accuracy.sh diff --git a/models/cv/classification/squeezenet_1.1/ixrt/scripts/infer_squeezenet_v11_fp16_performance.sh b/models/cv/classification/squeezenet_v11/ixrt/scripts/infer_squeezenet_v11_fp16_performance.sh similarity index 100% rename from models/cv/classification/squeezenet_1.1/ixrt/scripts/infer_squeezenet_v11_fp16_performance.sh rename to models/cv/classification/squeezenet_v11/ixrt/scripts/infer_squeezenet_v11_fp16_performance.sh diff --git a/models/cv/classification/squeezenet_1.1/ixrt/scripts/infer_squeezenet_v11_int8_accuracy.sh b/models/cv/classification/squeezenet_v11/ixrt/scripts/infer_squeezenet_v11_int8_accuracy.sh similarity index 100% rename from models/cv/classification/squeezenet_1.1/ixrt/scripts/infer_squeezenet_v11_int8_accuracy.sh rename to models/cv/classification/squeezenet_v11/ixrt/scripts/infer_squeezenet_v11_int8_accuracy.sh diff --git a/models/cv/classification/squeezenet_1.1/ixrt/scripts/infer_squeezenet_v11_int8_performance.sh b/models/cv/classification/squeezenet_v11/ixrt/scripts/infer_squeezenet_v11_int8_performance.sh similarity index 100% rename from models/cv/classification/squeezenet_1.1/ixrt/scripts/infer_squeezenet_v11_int8_performance.sh rename to models/cv/classification/squeezenet_v11/ixrt/scripts/infer_squeezenet_v11_int8_performance.sh diff --git a/models/cv/classification/squeezenet_1.1/ixrt/simplify_model.py b/models/cv/classification/squeezenet_v11/ixrt/simplify_model.py similarity index 100% rename from models/cv/classification/squeezenet_1.1/ixrt/simplify_model.py rename to models/cv/classification/squeezenet_v11/ixrt/simplify_model.py diff --git a/models/cv/classification/vgg16/ixrt/README.md b/models/cv/classification/vgg16/ixrt/README.md index 370adda08eab7178f22bd98afd3e57eb8dbbb720..789fcc8b3684285c58795ecd92fb93f707817dd2 100644 --- a/models/cv/classification/vgg16/ixrt/README.md +++ b/models/cv/classification/vgg16/ixrt/README.md @@ -16,20 +16,20 @@ yum install -y mesa-libGL ## Ubuntu apt install -y libgl1-mesa-dev -pip3 install tqdm -pip3 install onnxsim -pip3 install opencv-python==4.6.0.66 +pip3 install -r requirements.txt ``` ### Download +Pretrained model: + Dataset: to download the validation dataset. ### Model Conversion ```bash mkdir checkpoints -python3 export_onnx.py --output_model checkpoints/vgg16.onnx +python3 export_onnx.py --origin_model /path/to/vgg16-397923af.pth --output_model checkpoints/vgg16.onnx ``` ## Inference diff --git a/models/cv/classification/vgg16/ixrt/export_onnx.py b/models/cv/classification/vgg16/ixrt/export_onnx.py index aa0b19ae125c80b7b1c9253a602d8cd8dd628184..17d8bb550f8ba8af7bcf8073682808d44d34701d 100644 --- a/models/cv/classification/vgg16/ixrt/export_onnx.py +++ b/models/cv/classification/vgg16/ixrt/export_onnx.py @@ -18,12 +18,14 @@ import argparse def parse_args(): parser = argparse.ArgumentParser() + parser.add_argument("--origin_model", type=str) parser.add_argument("--output_model", type=str) args = parser.parse_args() return args args = parse_args() -model = models.vgg16(pretrained=True) +model = models.vgg16() +model.load_state_dict(torch.load(args.origin_model)) model.cuda() model.eval() input = torch.randn(1, 3, 224, 224, device='cuda') diff --git a/models/cv/classification/vgg16/ixrt/inference.py b/models/cv/classification/vgg16/ixrt/inference.py index 1ec56b4a1f09ee4bd7516461f758ac121a5346a0..4e178df4059e16e61daf0644862f27dd5fe99fbe 100644 --- a/models/cv/classification/vgg16/ixrt/inference.py +++ b/models/cv/classification/vgg16/ixrt/inference.py @@ -84,6 +84,7 @@ def main(config): total_sample = 0 acc_top1, acc_top5 = 0, 0 + start_time = time.time() with tqdm(total= len(dataloader)) as _tqdm: for idx, (batch_data, batch_label) in enumerate(dataloader): batch_data = batch_data.numpy().astype(inputs[0]["dtype"]) @@ -105,7 +106,10 @@ def main(config): _tqdm.set_postfix(acc_1='{:.4f}'.format(acc_top1/total_sample), acc_5='{:.4f}'.format(acc_top5/total_sample)) _tqdm.update(1) + end_time = time.time() + end2end_time = end_time - start_time + print(F"E2E time : {end2end_time:.3f} seconds") print(F"Acc@1 : {acc_top1/total_sample} = {acc_top1}/{total_sample}") print(F"Acc@5 : {acc_top5/total_sample} = {acc_top5}/{total_sample}") acc1 = acc_top1/total_sample diff --git a/models/cv/classification/vgg16/ixrt/requirements.txt b/models/cv/classification/vgg16/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..24dc14b78899672d6308c24c16a8168b90577adb --- /dev/null +++ b/models/cv/classification/vgg16/ixrt/requirements.txt @@ -0,0 +1,3 @@ +tqdm +onnxsim +opencv-python==4.6.0.66 \ No newline at end of file diff --git a/models/cv/detection/yolov8/igie/README.md b/models/cv/detection/yolov8/igie/README.md index ce5ce80d45f958e2df15e464e5685363f7234a22..cb2a3594c7c7a629bca502caaa9a793ee652f76a 100644 --- a/models/cv/detection/yolov8/igie/README.md +++ b/models/cv/detection/yolov8/igie/README.md @@ -18,7 +18,7 @@ apt install -y libgl1-mesa-dev pip3 install tqdm pip3 install onnx pip3 install pycocotools -pip3 install ultralytics +pip3 install ultralytics==8.2.51 ``` ### Download diff --git a/models/cv/detection/yolov8/ixrt/README.md b/models/cv/detection/yolov8/ixrt/README.md index 96d637e0d9bfd485d169ddaf7c55d26eddeacbf3..6ed7ea5364fe0d7eeecf65d31d75ca47ecb52676 100644 --- a/models/cv/detection/yolov8/ixrt/README.md +++ b/models/cv/detection/yolov8/ixrt/README.md @@ -15,12 +15,7 @@ yum install -y mesa-libGL ## Ubuntu apt install -y libgl1-mesa-glx -pip3 install tqdm -pip3 install onnx -pip3 install onnxsim -pip3 install pycocotools -pip3 install ultralytics -pip3 install cuda-python +pip3 install -r requirements.txt ``` ### Download @@ -29,39 +24,40 @@ Pretrained model: to download the validation dataset. -```bash -# get yolov8n.pt -wget https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt -# set coco path -mkdir -p data/ -ln -s /Path/to/coco/ data/coco -``` - ### Model Conversion ```bash -python3 export.py --weight yolov8n.pt --batch 32 -onnxsim yolov8n.onnx ./data/yolov8n.onnx +mkdir -p checkpoints/ +mv yolov8n.pt yolov8.pt +python3 export.py --weight yolov8.pt --batch 32 +onnxsim yolov8.onnx ./checkpoints/yolov8.onnx ``` ## Inference +```bash +export PROJ_DIR=./ +export DATASETS_DIR=/path/to/coco/ +export CHECKPOINTS_DIR=./checkpoints +export RUN_DIR=./ +``` + ### FP16 ```bash # Accuracy -bash scripts/infer_yolov8n_fp16_accuracy.sh +bash scripts/infer_yolov8_fp16_accuracy.sh # Performance -bash scripts/infer_yolov8n_fp16_performance.sh +bash scripts/infer_yolov8_fp16_performance.sh ``` ### INT8 ```bash # Accuracy -bash scripts/infer_yolov8n_int8_accuracy.sh +bash scripts/infer_yolov8_int8_accuracy.sh # Performance -bash scripts/infer_yolov8n_int8_performance.sh +bash scripts/infer_yolov8_int8_performance.sh ``` ## Results diff --git a/models/cv/detection/yolov8/ixrt/requirements.txt b/models/cv/detection/yolov8/ixrt/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..cc201ca4f6b252e9330de7997a4e707544f07470 --- /dev/null +++ b/models/cv/detection/yolov8/ixrt/requirements.txt @@ -0,0 +1,6 @@ +tqdm +onnx +onnxsim +pycocotools +ultralytics==8.2.51 +cuda-python \ No newline at end of file diff --git a/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8n_fp16_accuracy.sh b/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8_fp16_accuracy.sh similarity index 88% rename from models/cv/detection/yolov8/ixrt/scripts/infer_yolov8n_fp16_accuracy.sh rename to models/cv/detection/yolov8/ixrt/scripts/infer_yolov8_fp16_accuracy.sh index 44e7537657a65fc84d89531b8df9ad647513dfbe..65434fece2c57f803a2e27eadf2289bdf61cc7b3 100644 --- a/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8n_fp16_accuracy.sh +++ b/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8_fp16_accuracy.sh @@ -22,27 +22,27 @@ check_status() fi } -PROJ_DIR=$(cd $(dirname $0);cd ../; pwd) -DATASETS_DIR="${PROJ_DIR}/data/coco" +PROJ_DIR=${PROJ_DIR} +DATASETS_DIR=${DATASETS_DIR} COCO_GT=${DATASETS_DIR}/annotations/instances_val2017.json EVAL_DIR=${DATASETS_DIR}/images/val2017 -CHECKPOINTS_DIR="${PROJ_DIR}/data" -RUN_DIR="${PROJ_DIR}" +CHECKPOINTS_DIR=${CHECKPOINTS_DIR} +RUN_DIR=${PROJ_DIR} ORIGINE_MODEL=${CHECKPOINTS_DIR} echo CHECKPOINTS_DIR : ${CHECKPOINTS_DIR} echo DATASETS_DIR : ${DATASETS_DIR} echo RUN_DIR : ${RUN_DIR} echo ====================== Model Info ====================== -echo Model Name : yolov8n +echo Model Name : yolov8 echo Onnx Path : ${ORIGINE_MODEL} BATCH_SIZE=32 -CURRENT_MODEL=${CHECKPOINTS_DIR}/yolov8n.onnx +CURRENT_MODEL=${CHECKPOINTS_DIR}/yolov8.onnx # Build Engine echo Build Engine -ENGINE_FILE=${CHECKPOINTS_DIR}/yolov8n_fp16.engine +ENGINE_FILE=${CHECKPOINTS_DIR}/yolov8_fp16.engine if [ -f $ENGINE_FILE ];then echo " "Build Engine Skip, $ENGINE_FILE has been existed else diff --git a/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8n_fp16_performance.sh b/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8_fp16_performance.sh similarity index 88% rename from models/cv/detection/yolov8/ixrt/scripts/infer_yolov8n_fp16_performance.sh rename to models/cv/detection/yolov8/ixrt/scripts/infer_yolov8_fp16_performance.sh index 1ab3808f1f45cf2072fa41a2107fa88c17fa3610..355b27bc6f306786f31c1dee3beabc5360e2bf0d 100644 --- a/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8n_fp16_performance.sh +++ b/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8_fp16_performance.sh @@ -22,27 +22,27 @@ check_status() fi } -PROJ_DIR=$(cd $(dirname $0);cd ../; pwd) -DATASETS_DIR="${PROJ_DIR}/data/coco" +PROJ_DIR=${PROJ_DIR} +DATASETS_DIR=${DATASETS_DIR} COCO_GT=${DATASETS_DIR}/annotations/instances_val2017.json EVAL_DIR=${DATASETS_DIR}/images/val2017 -CHECKPOINTS_DIR="${PROJ_DIR}/data" -RUN_DIR="${PROJ_DIR}" +CHECKPOINTS_DIR=${CHECKPOINTS_DIR} +RUN_DIR=${PROJ_DIR} ORIGINE_MODEL=${CHECKPOINTS_DIR} echo CHECKPOINTS_DIR : ${CHECKPOINTS_DIR} echo DATASETS_DIR : ${DATASETS_DIR} echo RUN_DIR : ${RUN_DIR} echo ====================== Model Info ====================== -echo Model Name : yolov8n +echo Model Name : yolov8 echo Onnx Path : ${ORIGINE_MODEL} BATCH_SIZE=32 -CURRENT_MODEL=${CHECKPOINTS_DIR}/yolov8n.onnx +CURRENT_MODEL=${CHECKPOINTS_DIR}/yolov8.onnx # Build Engine echo Build Engine -ENGINE_FILE=${CHECKPOINTS_DIR}/yolov8n_fp16.engine +ENGINE_FILE=${CHECKPOINTS_DIR}/yolov8_fp16.engine if [ -f $ENGINE_FILE ];then echo " "Build Engine Skip, $ENGINE_FILE has been existed else diff --git a/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8n_int8_accuracy.sh b/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8_int8_accuracy.sh similarity index 90% rename from models/cv/detection/yolov8/ixrt/scripts/infer_yolov8n_int8_accuracy.sh rename to models/cv/detection/yolov8/ixrt/scripts/infer_yolov8_int8_accuracy.sh index a2257463d70ee8fe6e9853db0fafd44f98ad8c83..bf1ecf255f6a16ee859573b1dd5d93b7d138b245 100644 --- a/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8n_int8_accuracy.sh +++ b/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8_int8_accuracy.sh @@ -22,12 +22,12 @@ check_status() fi } -PROJ_DIR=$(cd $(dirname $0);cd ../; pwd) -DATASETS_DIR="${PROJ_DIR}/data/coco" +PROJ_DIR=${PROJ_DIR} +DATASETS_DIR=${DATASETS_DIR} COCO_GT=${DATASETS_DIR}/annotations/instances_val2017.json EVAL_DIR=${DATASETS_DIR}/images/val2017 -CHECKPOINTS_DIR="${PROJ_DIR}/data" -RUN_DIR="${PROJ_DIR}" +CHECKPOINTS_DIR=${CHECKPOINTS_DIR} +RUN_DIR=${PROJ_DIR} ORIGINE_MODEL=${CHECKPOINTS_DIR} DISABLE_NAMES=('/model.22/Concat' '/model.22/Concat_1' '/model.22/Concat_2' '/model.22/Reshape' '/model.22/Reshape_1' '/model.22/Reshape_2' '/model.22/Concat_3' '/model.22/Split' '/model.22/dfl/Reshape' '/model.22/dfl/Transpose' '/model.22/dfl/Softmax' '/model.22/dfl/Transpose_1' '/model.22/dfl/conv/Conv' '/model.22/dfl/Reshape_1' '/model.22/Slice' '/model.22/Slice_1' '/model.22/Sub' '/model.22/Add_1' '/model.22/Add_2' '/model.22/Div_1' '/model.22/Sub_1' '/model.22/Concat_4' '/model.22/Mul_2' '/model.22/Sigmoid' '/model.22/Concat_5') @@ -35,19 +35,19 @@ echo CHECKPOINTS_DIR : ${CHECKPOINTS_DIR} echo DATASETS_DIR : ${DATASETS_DIR} echo RUN_DIR : ${RUN_DIR} echo ====================== Model Info ====================== -echo Model Name : yolov8n +echo Model Name : yolov8 echo Onnx Path : ${ORIGINE_MODEL} BATCH_SIZE=32 -CURRENT_MODEL=${CHECKPOINTS_DIR}/yolov8n.onnx +CURRENT_MODEL=${CHECKPOINTS_DIR}/yolov8.onnx # quant -FINAL_MODEL=${CHECKPOINTS_DIR}/quantized_yolov8n_bs${BATCH_SIZE}.onnx +FINAL_MODEL=${CHECKPOINTS_DIR}/quantized_yolov8_bs${BATCH_SIZE}.onnx if [ -f $FINAL_MODEL ];then echo " "Quantize Skip, $FINAL_MODEL has been existed else python3 ${RUN_DIR}/quant.py \ - --model_name "YOLOV8N" \ + --model_name "YOLOV8" \ --model ${CURRENT_MODEL} \ --bsz ${BATCH_SIZE} \ --dataset_dir ${EVAL_DIR} \ @@ -62,7 +62,7 @@ CURRENT_MODEL=${FINAL_MODEL} # Build Engine echo Build Engine -ENGINE_FILE=${CHECKPOINTS_DIR}/yolov8n_int8.engine +ENGINE_FILE=${CHECKPOINTS_DIR}/yolov8_int8.engine if [ -f $ENGINE_FILE ];then echo " "Build Engine Skip, $ENGINE_FILE has been existed else diff --git a/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8n_int8_performance.sh b/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8_int8_performance.sh similarity index 89% rename from models/cv/detection/yolov8/ixrt/scripts/infer_yolov8n_int8_performance.sh rename to models/cv/detection/yolov8/ixrt/scripts/infer_yolov8_int8_performance.sh index f1774d5b2b28ce734dadb3e022a3359b3790f2da..f4c31b681cb2b436543cdbe568d5349e3693080d 100644 --- a/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8n_int8_performance.sh +++ b/models/cv/detection/yolov8/ixrt/scripts/infer_yolov8_int8_performance.sh @@ -22,31 +22,31 @@ check_status() fi } -PROJ_DIR=$(cd $(dirname $0);cd ../; pwd) -DATASETS_DIR="${PROJ_DIR}/data/coco" +PROJ_DIR=${PROJ_DIR} +DATASETS_DIR=${DATASETS_DIR} COCO_GT=${DATASETS_DIR}/annotations/instances_val2017.json EVAL_DIR=${DATASETS_DIR}/images/val2017 -CHECKPOINTS_DIR="${PROJ_DIR}/data" -RUN_DIR="${PROJ_DIR}" +CHECKPOINTS_DIR=${CHECKPOINTS_DIR} +RUN_DIR=${PROJ_DIR} ORIGINE_MODEL=${CHECKPOINTS_DIR} echo CHECKPOINTS_DIR : ${CHECKPOINTS_DIR} echo DATASETS_DIR : ${DATASETS_DIR} echo RUN_DIR : ${RUN_DIR} echo ====================== Model Info ====================== -echo Model Name : yolov8n +echo Model Name : yolov8 echo Onnx Path : ${ORIGINE_MODEL} BATCH_SIZE=32 -CURRENT_MODEL=${CHECKPOINTS_DIR}/yolov8n.onnx +CURRENT_MODEL=${CHECKPOINTS_DIR}/yolov8.onnx # quant -FINAL_MODEL=${CHECKPOINTS_DIR}/quantized_yolov8n_bs${BATCH_SIZE}.onnx +FINAL_MODEL=${CHECKPOINTS_DIR}/quantized_yolov8_bs${BATCH_SIZE}.onnx if [ -f $FINAL_MODEL ];then echo " "Quantize Skip, $FINAL_MODEL has been existed else python3 ${RUN_DIR}/quant.py \ - --model_name "YOLOV8N" \ + --model_name "YOLOV8" \ --model ${CURRENT_MODEL} \ --bsz ${BATCH_SIZE} \ --dataset_dir ${EVAL_DIR} \ @@ -61,7 +61,7 @@ CURRENT_MODEL=${FINAL_MODEL} # Build Engine echo Build Engine -ENGINE_FILE=${CHECKPOINTS_DIR}/yolov8n_int8.engine +ENGINE_FILE=${CHECKPOINTS_DIR}/yolov8_int8.engine if [ -f $ENGINE_FILE ];then echo " "Build Engine Skip, $ENGINE_FILE has been existed else