From d54094a67d44cdb7d95ae0fb36468d925095d3a6 Mon Sep 17 00:00:00 2001 From: YoungPeng Date: Tue, 15 Apr 2025 10:52:51 +0800 Subject: [PATCH 1/5] Add: yolov12 inference script. --- .../object_detection/yolov12/igie/README.md | 59 ++++++++ .../yolov12/igie/build_engine.py | 73 +++++++++ .../yolov12/igie/ci/prepare.sh | 36 +++++ .../object_detection/yolov12/igie/export.py | 43 ++++++ .../yolov12/igie/inference.py | 140 ++++++++++++++++++ .../yolov12/igie/requirements.txt | 2 + .../scripts/infer_yolov12_fp16_accuracy.sh | 35 +++++ .../scripts/infer_yolov12_fp16_performance.sh | 36 +++++ .../yolov12/igie/validator.py | 89 +++++++++++ 9 files changed, 513 insertions(+) create mode 100644 models/cv/object_detection/yolov12/igie/README.md create mode 100644 models/cv/object_detection/yolov12/igie/build_engine.py create mode 100644 models/cv/object_detection/yolov12/igie/ci/prepare.sh create mode 100644 models/cv/object_detection/yolov12/igie/export.py create mode 100644 models/cv/object_detection/yolov12/igie/inference.py create mode 100644 models/cv/object_detection/yolov12/igie/requirements.txt create mode 100644 models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_accuracy.sh create mode 100644 models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_performance.sh create mode 100644 models/cv/object_detection/yolov12/igie/validator.py diff --git a/models/cv/object_detection/yolov12/igie/README.md b/models/cv/object_detection/yolov12/igie/README.md new file mode 100644 index 00000000..537773b9 --- /dev/null +++ b/models/cv/object_detection/yolov12/igie/README.md @@ -0,0 +1,59 @@ +# YOLOv12 (IGIE) + +## Model Description + +YOLOv12 achieves high precision and efficient real-time object detection by integrating attention mechanisms and innovative architectural design. YOLOv12-N is the lightweight version of this series, optimized for resource-constrained environments, maintaining the core advantages of YOLOv12 while offering fast inference and excellent detection accuracy. + +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| MR-V100 | 4.2.0 | 25.06 | + +## Model Preparation + +### Prepare Resources + +Pretrained model: + +### Install Dependencies + +```bash +pip3 install -r requirements.txt +``` + +## Model Conversion + +```bash +git clone --depth 1 https://github.com/sunsmarterjie/yolov12.git +cd yolov12 +pip3 install -e . +cd .. + +python3 export.py --weight yolov12n.pt --batch 32 +``` + +## Model Inference + +```bash +export DATASETS_DIR=/Path/to/coco/ +``` + +### FP16 + +```bash +# Accuracy +bash scripts/infer_yolov12_fp16_accuracy.sh +# Performance +bash scripts/infer_yolov12_fp16_performance.sh +``` + +## Model Results + +| Model | BatchSize | Precision | FPS | IOU@0.5 | IOU@0.5:0.95 | +| ------- | --------- | --------- | ------- | ------- | ------------ | +| YOLOv12 | 32 | FP16 | 666.641 | 0.559 | 0.403 | + +## References + +YOLOv12: diff --git a/models/cv/object_detection/yolov12/igie/build_engine.py b/models/cv/object_detection/yolov12/igie/build_engine.py new file mode 100644 index 00000000..d3626ae7 --- /dev/null +++ b/models/cv/object_detection/yolov12/igie/build_engine.py @@ -0,0 +1,73 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tvm +import argparse +from tvm import relay +from tvm.relay.import_model import import_model_to_igie + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--model_path", + type=str, + required=True, + help="original model path.") + + parser.add_argument("--engine_path", + type=str, + required=True, + help="igie export engine path.") + + parser.add_argument("--input", + type=str, + required=True, + help=""" + input info of the model, format should be: + input_name:input_shape + eg: --input input:1,3,224,224. + """) + + parser.add_argument("--precision", + type=str, + choices=["fp32", "fp16", "int8"], + required=True, + help="model inference precision.") + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + + # get input valueinfo + input_name, input_shape = args.input.split(":") + shape = tuple([int(s) for s in input_shape.split(",")]) + input_dict = {input_name: shape} + + target = tvm.target.iluvatar(model="MR", options="-libs=cudnn,cublas,ixinfer") + + mod, params = import_model_to_igie(args.model_path, input_dict, backend="igie") + + # build engine + lib = tvm.relay.build(mod, target=target, params=params, precision=args.precision) + + # export engine + lib.export_library(args.engine_path) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/models/cv/object_detection/yolov12/igie/ci/prepare.sh b/models/cv/object_detection/yolov12/igie/ci/prepare.sh new file mode 100644 index 00000000..09a8d532 --- /dev/null +++ b/models/cv/object_detection/yolov12/igie/ci/prepare.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -x + +ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') +if [[ ${ID} == "ubuntu" ]]; then + apt install -y libgl1-mesa-glx +elif [[ ${ID} == "centos" ]]; then + yum install -y mesa-libGL +else + echo "Not Support Os" +fi + +pip3 install -r requirements.txt + +git clone --depth 1 https://github.com/sunsmarterjie/yolov12.git + +cd yolov12 +pip3 install -e . +cd .. + +python3 export.py --weight yolo12n.pt --batch 32 diff --git a/models/cv/object_detection/yolov12/igie/export.py b/models/cv/object_detection/yolov12/igie/export.py new file mode 100644 index 00000000..aec62f72 --- /dev/null +++ b/models/cv/object_detection/yolov12/igie/export.py @@ -0,0 +1,43 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse +from ultralytics import YOLO + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--weight", + type=str, + required=True, + help="pytorch model weight.") + + parser.add_argument("--batch", + type=int, + required=True, + help="batchsize of the model.") + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + + model = YOLO(args.weight).cpu() + + model.export(format='onnx', batch=args.batch, imgsz=(640, 640), optimize=True, simplify=True, opset=13) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/models/cv/object_detection/yolov12/igie/inference.py b/models/cv/object_detection/yolov12/igie/inference.py new file mode 100644 index 00000000..cbed1c03 --- /dev/null +++ b/models/cv/object_detection/yolov12/igie/inference.py @@ -0,0 +1,140 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse +import os + +import tvm +from tvm import relay + +import numpy as np +from pathlib import Path +from ultralytics import YOLO +from ultralytics.cfg import get_cfg +from ultralytics.utils import DEFAULT_CFG +from validator import IGIE_Validator + + + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--engine", + type=str, + required=True, + help="igie engine path.") + + parser.add_argument("--batchsize", + type=int, + required=True, + help="inference batch size.") + + parser.add_argument("--datasets", + type=str, + required=True, + help="datasets path.") + + parser.add_argument("--input_name", + type=str, + required=True, + help="input name of the model.") + + parser.add_argument("--warmup", + type=int, + default=3, + help="number of warmup before test.") + + parser.add_argument("--acc_target", + type=float, + default=None, + help="Model inference Accuracy target.") + + parser.add_argument("--fps_target", + type=float, + default=None, + help="Model inference FPS target.") + + parser.add_argument("--perf_only", + type=bool, + default=False, + help="Run performance test only") + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + + batch_size = args.batchsize + + # create iluvatar target & device + target = tvm.target.iluvatar(model="MR", options="-libs=cudnn,cublas,ixinfer") + device = tvm.device(target.kind.name, 0) + + # load engine + lib = tvm.runtime.load_module(args.engine) + + # create runtime from engine + module = tvm.contrib.graph_executor.GraphModule(lib["default"](device)) + + # just run perf test + if args.perf_only: + ftimer = module.module.time_evaluator("run", device, number=100, repeat=1) + prof_res = np.array(ftimer().results) * 1000 + fps = batch_size * 1000 / np.mean(prof_res) + print(f"\n* Mean inference time: {np.mean(prof_res):.3f} ms, Mean fps: {fps:.3f}") + else: + root_path = args.datasets + val_path = os.path.join(root_path, 'val2017.txt') + + overrides = {} + overrides['mode'] = 'val' + + cfg_args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides) + + cfg_args.batch = args.batchsize + + cfg_args.data = { + 'path': Path(root_path), + 'val': val_path, + 'names': + { + 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', + 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', + 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', + 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', + 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', + 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', + 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', + 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', + 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', + 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', + 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', + 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', + 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', + 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', + 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush' + }, + 'nc': 80} + cfg_args.save_json = True + + validator = IGIE_Validator(args=cfg_args, save_dir=Path('.')) + validator.stride = 32 + + stats = validator(module, device) + +if __name__ == "__main__": + main() diff --git a/models/cv/object_detection/yolov12/igie/requirements.txt b/models/cv/object_detection/yolov12/igie/requirements.txt new file mode 100644 index 00000000..ba0fdc99 --- /dev/null +++ b/models/cv/object_detection/yolov12/igie/requirements.txt @@ -0,0 +1,2 @@ +tqdm +onnx==1.13.0 diff --git a/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_accuracy.sh b/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_accuracy.sh new file mode 100644 index 00000000..d2a725f9 --- /dev/null +++ b/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_accuracy.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="yolov12n.onnx" +datasets_path=${DATASETS_DIR} + +# build engine +python3 build_engine.py \ + --model_path ${model_path} \ + --input images:${batchsize},3,640,640 \ + --precision fp16 \ + --engine_path yolov12n_bs_${batchsize}_fp16.so + + +# inference +python3 inference.py \ + --engine yolov12n_bs_${batchsize}_fp16.so \ + --batchsize ${batchsize} \ + --input_name images \ + --datasets ${datasets_path} \ No newline at end of file diff --git a/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_performance.sh b/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_performance.sh new file mode 100644 index 00000000..0b7d208d --- /dev/null +++ b/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_performance.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="yolov12n.onnx" +datasets_path=${DATASETS_DIR} + +# build engine +python3 build_engine.py \ + --model_path ${model_path} \ + --input images:${batchsize},3,640,640 \ + --precision fp16 \ + --engine_path yolov12n_bs_${batchsize}_fp16.so + + +# inference +python3 inference.py \ + --engine yolov12n_bs_${batchsize}_fp16.so \ + --batchsize ${batchsize} \ + --input_name images \ + --datasets ${datasets_path} \ + --perf_only True \ No newline at end of file diff --git a/models/cv/object_detection/yolov12/igie/validator.py b/models/cv/object_detection/yolov12/igie/validator.py new file mode 100644 index 00000000..b717b5c4 --- /dev/null +++ b/models/cv/object_detection/yolov12/igie/validator.py @@ -0,0 +1,89 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import tvm +import json +import torch +import numpy as np + +from tqdm import tqdm + +from ultralytics.models.yolo.detect import DetectionValidator +from ultralytics.data.utils import check_det_dataset +from ultralytics.utils.metrics import ConfusionMatrix +from ultralytics.data.converter import coco80_to_coco91_class + +class IGIE_Validator(DetectionValidator): + def __call__(self, engine, device): + self.data = self.args.data + self.dataloader = self.get_dataloader(self.data.get(self.args.split), self.args.batch) + self.init_metrics() + + self.stats = {'tp': [], 'conf': [], 'pred_cls': [], 'target_cls': [], 'target_img': []} + + # wram up + for _ in range(3): + engine.run() + + for batch in tqdm(self.dataloader): + batch = self.preprocess(batch) + + imgs = batch['img'] + pad_batch = len(imgs) != self.args.batch + if pad_batch: + origin_size = len(imgs) + imgs = np.resize(imgs, (self.args.batch, *imgs.shape[1:])) + + engine.set_input(0, tvm.nd.array(imgs, device)) + + engine.run() + + outputs = engine.get_output(0).asnumpy() + + if pad_batch: + outputs = outputs[:origin_size] + + outputs = torch.from_numpy(outputs) + + preds = self.postprocess([outputs]) + + self.update_metrics(preds, batch) + + stats = self.get_stats() + + if self.args.save_json and self.jdict: + with open(str(self.save_dir / 'predictions.json'), 'w') as f: + print(f'Saving {f.name} ...') + json.dump(self.jdict, f) # flatten and save + + stats = self.eval_json(stats) + + return stats + + def init_metrics(self): + """Initialize evaluation metrics for YOLO.""" + val = self.data.get(self.args.split, '') # validation path + self.is_coco = isinstance(val, str) and 'coco' in val and val.endswith(f'{os.sep}val2017.txt') # is COCO + self.class_map = coco80_to_coco91_class() if self.is_coco else list(range(1000)) + self.args.save_json |= self.is_coco and not self.training # run on final val if training COCO + self.names = self.data['names'] + self.nc = len(self.names) + self.metrics.names = self.names + self.confusion_matrix = ConfusionMatrix(nc=80) + self.seen = 0 + self.jdict = [] + self.stats = [] + -- Gitee From 3cdcb6168b83e05cc0c12ffa63cd79397c7c6ddf Mon Sep 17 00:00:00 2001 From: YoungPeng Date: Tue, 15 Apr 2025 14:40:25 +0800 Subject: [PATCH 2/5] Add: cspresnext50 inference script. --- .../cspresnext50/igie/README.md | 70 +++++++ .../cspresnext50/igie/build_engine.py | 73 +++++++ .../cspresnext50/igie/ci/prepare.sh | 33 ++++ .../cspresnext50/igie/export.py | 78 ++++++++ .../cspresnext50/igie/inference.py | 185 ++++++++++++++++++ .../cspresnext50/igie/requirements.txt | 5 + .../infer_cspresnext50_fp16_accuracy.sh | 35 ++++ .../infer_cspresnext50_fp16_performance.sh | 36 ++++ 8 files changed, 515 insertions(+) create mode 100644 models/cv/classification/cspresnext50/igie/README.md create mode 100644 models/cv/classification/cspresnext50/igie/build_engine.py create mode 100644 models/cv/classification/cspresnext50/igie/ci/prepare.sh create mode 100644 models/cv/classification/cspresnext50/igie/export.py create mode 100644 models/cv/classification/cspresnext50/igie/inference.py create mode 100644 models/cv/classification/cspresnext50/igie/requirements.txt create mode 100644 models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_accuracy.sh create mode 100644 models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_performance.sh diff --git a/models/cv/classification/cspresnext50/igie/README.md b/models/cv/classification/cspresnext50/igie/README.md new file mode 100644 index 00000000..66e8490f --- /dev/null +++ b/models/cv/classification/cspresnext50/igie/README.md @@ -0,0 +1,70 @@ +# CSPResNext50 (IGIE) + +## Model Description + +CSPResNeXt50 is a convolutional neural network that combines the CSPNet and ResNeXt architectures. It enhances computational efficiency and model performance through cross-stage partial connections and grouped convolutions, making it suitable for tasks such as image classification and object detection. This model improves learning capability and inference speed without significantly increasing the number of parameters. + +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| MR-V100 | 4.2.0 | 25.06 | + +## Model Preparation + +### Prepare Resources + +Pretrained model: + +Dataset: to download the validation dataset. + +### Install Dependencies + +```bash +# Install libGL +## CentOS +yum install -y mesa-libGL +## Ubuntu +apt install -y libgl1-mesa-glx + +pip3 install -r requirements.txt +``` + +### Model Conversion + +```bash +# git clone mmpretrain +git clone -b v0.24.0 https://github.com/open-mmlab/mmpretrain.git + +# export onnx model +python3 export.py --cfg mmpretrain/configs/cspnet/cspresnext50_8xb32_in1k.py --weight cspresnext50_3rdparty_8xb32_in1k_20220329-2cc84d21.pth --output cspresnext50.onnx + +# Use onnxsim optimize onnx model +onnxsim cspresnext50.onnx cspresnext50_opt.onnx + +``` + +## Model Inference + +```bash +export DATASETS_DIR=/Path/to/imagenet_val/ +``` + +### FP16 + +```bash +# Accuracy +bash scripts/infer_cspresnext50_fp16_accuracy.sh +# Performance +bash scripts/infer_cspresnext50_fp16_performance.sh +``` + +## Model Results + +| Model | BatchSize | Precision | FPS | Top-1(%) | Top-5(%) | +| ------------ | --------- | --------- | -------- | -------- | -------- | +| CSPResNext50 | 32 | FP16 | 1972.10 | 80.028 | 94.914 | + +## References + +- [mmpretrain](https://github.com/open-mmlab/mmpretrain) diff --git a/models/cv/classification/cspresnext50/igie/build_engine.py b/models/cv/classification/cspresnext50/igie/build_engine.py new file mode 100644 index 00000000..d3626ae7 --- /dev/null +++ b/models/cv/classification/cspresnext50/igie/build_engine.py @@ -0,0 +1,73 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tvm +import argparse +from tvm import relay +from tvm.relay.import_model import import_model_to_igie + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--model_path", + type=str, + required=True, + help="original model path.") + + parser.add_argument("--engine_path", + type=str, + required=True, + help="igie export engine path.") + + parser.add_argument("--input", + type=str, + required=True, + help=""" + input info of the model, format should be: + input_name:input_shape + eg: --input input:1,3,224,224. + """) + + parser.add_argument("--precision", + type=str, + choices=["fp32", "fp16", "int8"], + required=True, + help="model inference precision.") + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + + # get input valueinfo + input_name, input_shape = args.input.split(":") + shape = tuple([int(s) for s in input_shape.split(",")]) + input_dict = {input_name: shape} + + target = tvm.target.iluvatar(model="MR", options="-libs=cudnn,cublas,ixinfer") + + mod, params = import_model_to_igie(args.model_path, input_dict, backend="igie") + + # build engine + lib = tvm.relay.build(mod, target=target, params=params, precision=args.precision) + + # export engine + lib.export_library(args.engine_path) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/models/cv/classification/cspresnext50/igie/ci/prepare.sh b/models/cv/classification/cspresnext50/igie/ci/prepare.sh new file mode 100644 index 00000000..2c6f6bf1 --- /dev/null +++ b/models/cv/classification/cspresnext50/igie/ci/prepare.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -x + +ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') +if [[ ${ID} == "ubuntu" ]]; then + apt install -y libgl1-mesa-glx +elif [[ ${ID} == "centos" ]]; then + yum install -y mesa-libGL +else + echo "Not Support Os" +fi +pip3 install -r requirements.txt +unzip -q /mnt/deepspark/data/repos/mmpretrain-0.24.0.zip -d ./ +# export onnx model +python3 export.py --cfg mmpretrain/configs/cspnet/cspresnext50_8xb32_in1k.py --weight cspresnext50_3rdparty_8xb32_in1k_20220329-2cc84d21.pth --output cspresnext50.onnx + +# Use onnxsim optimize onnx model +onnxsim cspresnext50.onnx cspresnext50_opt.onnx \ No newline at end of file diff --git a/models/cv/classification/cspresnext50/igie/export.py b/models/cv/classification/cspresnext50/igie/export.py new file mode 100644 index 00000000..6eafb7b1 --- /dev/null +++ b/models/cv/classification/cspresnext50/igie/export.py @@ -0,0 +1,78 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import argparse + +import torch +from mmcls.apis import init_model + +class Model(torch.nn.Module): + def __init__(self, config_file, checkpoint_file): + super().__init__() + self.model = init_model(config_file, checkpoint_file, device="cpu") + + def forward(self, x): + feat = self.model.backbone(x) + feat = self.model.neck(feat[0]) + out_head = self.model.head.fc(feat) + return out_head + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--weight", + type=str, + required=True, + help="pytorch model weight.") + + parser.add_argument("--cfg", + type=str, + required=True, + help="model config file.") + + parser.add_argument("--output", + type=str, + required=True, + help="export onnx model path.") + + args = parser.parse_args() + return args + +def main(): + args = parse_args() + + config_file = args.cfg + checkpoint_file = args.weight + model = Model(config_file, checkpoint_file).eval() + + input_names = ['input'] + output_names = ['output'] + dynamic_axes = {'input': {0: '-1'}, 'output': {0: '-1'}} + dummy_input = torch.randn(1, 3, 224, 224) + + torch.onnx.export( + model, + dummy_input, + args.output, + input_names = input_names, + dynamic_axes = dynamic_axes, + output_names = output_names, + opset_version=13 + ) + + print("Export onnx model successfully! ") + +if __name__ == '__main__': + main() + diff --git a/models/cv/classification/cspresnext50/igie/inference.py b/models/cv/classification/cspresnext50/igie/inference.py new file mode 100644 index 00000000..1b0c602a --- /dev/null +++ b/models/cv/classification/cspresnext50/igie/inference.py @@ -0,0 +1,185 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys +import argparse +import tvm +import torch +import torchvision +import numpy as np +from tvm import relay +from tqdm import tqdm +from torchvision import transforms + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--engine", + type=str, + required=True, + help="igie engine path.") + + parser.add_argument("--batchsize", + type=int, + required=True, + help="inference batch size.") + + parser.add_argument("--datasets", + type=str, + required=True, + help="datasets path.") + + parser.add_argument("--input_name", + type=str, + required=True, + help="input name of the model.") + + parser.add_argument("--warmup", + type=int, + default=3, + help="number of warmup before test.") + + parser.add_argument("--num_workers", + type=int, + default=16, + help="number of workers used in pytorch dataloader.") + + parser.add_argument("--acc_target", + type=float, + default=None, + help="Model inference Accuracy target.") + + parser.add_argument("--fps_target", + type=float, + default=None, + help="Model inference FPS target.") + + parser.add_argument("--perf_only", + type=bool, + default=False, + help="Run performance test only") + + args = parser.parse_args() + + return args + +def get_dataloader(data_path, batch_size, num_workers): + dataset = torchvision.datasets.ImageFolder( + data_path, + transforms.Compose( + [ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.PILToTensor(), + transforms.ConvertImageDtype(torch.float), + transforms.Normalize( + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225) + ) + ] + ) + ) + + dataloader = torch.utils.data.DataLoader(dataset, batch_size, num_workers=num_workers) + + return dataloader + +def get_topk_accuracy(pred, label): + if isinstance(pred, np.ndarray): + pred = torch.from_numpy(pred) + + if isinstance(label, np.ndarray): + label = torch.from_numpy(label) + + top1_acc = 0 + top5_acc = 0 + for idx in range(len(label)): + label_value = label[idx] + if label_value == torch.topk(pred[idx].float(), 1).indices.data: + top1_acc += 1 + top5_acc += 1 + + elif label_value in torch.topk(pred[idx].float(), 5).indices.data: + top5_acc += 1 + + return top1_acc, top5_acc + +def main(): + args = parse_args() + + batch_size = args.batchsize + + # create iluvatar target & device + target = tvm.target.iluvatar(model="MR", options="-libs=cudnn,cublas,ixinfer") + device = tvm.device(target.kind.name, 0) + + # load engine + lib = tvm.runtime.load_module(args.engine) + + # create runtime from engine + module = tvm.contrib.graph_executor.GraphModule(lib["default"](device)) + + # just run perf test + if args.perf_only: + ftimer = module.module.time_evaluator("run", device, number=100, repeat=1) + prof_res = np.array(ftimer().results) * 1000 + fps = batch_size * 1000 / np.mean(prof_res) + print(f"\n* Mean inference time: {np.mean(prof_res):.3f} ms, Mean fps: {fps:.3f}") + else: + # warm up + for _ in range(args.warmup): + module.run() + + # get dataloader + dataloader = get_dataloader(args.datasets, batch_size, args.num_workers) + + top1_acc = 0 + top5_acc = 0 + total_num = 0 + + for image, label in tqdm(dataloader): + + # pad the last batch + pad_batch = len(image) != batch_size + + if pad_batch: + origin_size = len(image) + image = np.resize(image, (batch_size, *image.shape[1:])) + + module.set_input(args.input_name, tvm.nd.array(image, device)) + + # run inference + module.run() + + pred = module.get_output(0).asnumpy() + + if pad_batch: + pred = pred[:origin_size] + + # get batch accuracy + batch_top1_acc, batch_top5_acc = get_topk_accuracy(pred, label) + + top1_acc += batch_top1_acc + top5_acc += batch_top5_acc + total_num += batch_size + + result_stat = {} + result_stat["acc@1"] = round(top1_acc / total_num * 100.0, 3) + result_stat["acc@5"] = round(top5_acc / total_num * 100.0, 3) + + print(f"\n* Top1 acc: {result_stat['acc@1']} %, Top5 acc: {result_stat['acc@5']} %") + +if __name__ == "__main__": + main() diff --git a/models/cv/classification/cspresnext50/igie/requirements.txt b/models/cv/classification/cspresnext50/igie/requirements.txt new file mode 100644 index 00000000..41c31663 --- /dev/null +++ b/models/cv/classification/cspresnext50/igie/requirements.txt @@ -0,0 +1,5 @@ +onnx +tqdm +onnxsim +mmcv==1.5.3 +mmcls diff --git a/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_accuracy.sh b/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_accuracy.sh new file mode 100644 index 00000000..e3062666 --- /dev/null +++ b/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_accuracy.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="cspresnext50_opt.onnx" +datasets_path=${DATASETS_DIR} + +# build engine +python3 build_engine.py \ + --model_path ${model_path} \ + --input input:${batchsize},3,224,224 \ + --precision fp16 \ + --engine_path cspresnext50_opt_bs_${batchsize}_fp16.so + + +# inference +python3 inference.py \ + --engine cspresnext50_opt_bs_${batchsize}_fp16.so \ + --batchsize ${batchsize} \ + --input_name input \ + --datasets ${datasets_path} \ No newline at end of file diff --git a/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_performance.sh b/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_performance.sh new file mode 100644 index 00000000..38f8b10e --- /dev/null +++ b/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_performance.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="cspresnext50_opt.onnx" +datasets_path=${DATASETS_DIR} + +# build engine +python3 build_engine.py \ + --model_path ${model_path} \ + --input input:${batchsize},3,224,224 \ + --precision fp16 \ + --engine_path cspresnext50_opt_bs_${batchsize}_fp16.so + + +# inference +python3 inference.py \ + --engine cspresnext50_opt_bs_${batchsize}_fp16.so \ + --batchsize ${batchsize} \ + --input_name input \ + --datasets ${datasets_path} \ + --perf_only True \ No newline at end of file -- Gitee From 2c833994df17918598167c67de2914ce4e47bb3f Mon Sep 17 00:00:00 2001 From: YoungPeng Date: Tue, 15 Apr 2025 17:51:25 +0800 Subject: [PATCH 3/5] Add: twins_pcpvt inference script. --- .../classification/twins_pcpvt/igie/README.md | 70 +++++++ .../twins_pcpvt/igie/build_engine.py | 73 +++++++ .../twins_pcpvt/igie/ci/prepare.sh | 33 ++++ .../classification/twins_pcpvt/igie/export.py | 78 ++++++++ .../twins_pcpvt/igie/inference.py | 185 ++++++++++++++++++ .../twins_pcpvt/igie/requirements.txt | 5 + .../infer_twins_pcpvt_small_fp16_accuracy.sh | 35 ++++ ...nfer_twins_pcpvt_small_fp16_performance.sh | 36 ++++ 8 files changed, 515 insertions(+) create mode 100644 models/cv/classification/twins_pcpvt/igie/README.md create mode 100644 models/cv/classification/twins_pcpvt/igie/build_engine.py create mode 100644 models/cv/classification/twins_pcpvt/igie/ci/prepare.sh create mode 100644 models/cv/classification/twins_pcpvt/igie/export.py create mode 100644 models/cv/classification/twins_pcpvt/igie/inference.py create mode 100644 models/cv/classification/twins_pcpvt/igie/requirements.txt create mode 100644 models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_accuracy.sh create mode 100644 models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_performance.sh diff --git a/models/cv/classification/twins_pcpvt/igie/README.md b/models/cv/classification/twins_pcpvt/igie/README.md new file mode 100644 index 00000000..4695e41e --- /dev/null +++ b/models/cv/classification/twins_pcpvt/igie/README.md @@ -0,0 +1,70 @@ +# Twins_PCPVT (IGIE) + +## Model Description + +Twins_PCPVT Small is a lightweight vision transformer model that combines pyramid convolutions and self-attention mechanisms, designed for efficient image classification. It enhances the model's expressive capability through multi-scale feature extraction and convolutional embeddings. + +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| MR-V100 | 4.2.0 | 25.06 | + +## Model Preparation + +### Prepare Resources + +Pretrained model: + +Dataset: to download the validation dataset. + +### Install Dependencies + +```bash +# Install libGL +## CentOS +yum install -y mesa-libGL +## Ubuntu +apt install -y libgl1-mesa-glx + +pip3 install -r requirements.txt +``` + +### Model Conversion + +```bash +# git clone mmpretrain +git clone -b v0.24.0 https://github.com/open-mmlab/mmpretrain.git + +# export onnx model +python3 export.py --cfg mmpretrain/configs/twins/twins-pcpvt-small_8xb128_in1k.py --weight twins-pcpvt-small_3rdparty_8xb128_in1k_20220126-ef23c132.pth --output twins_pcpvt_small.onnx + +# Use onnxsim optimize onnx model +onnxsim twins_pcpvt_small.onnx twins_pcpvt_small_opt.onnx + +``` + +## Model Inference + +```bash +export DATASETS_DIR=/Path/to/imagenet_val/ +``` + +### FP16 + +```bash +# Accuracy +bash scripts/infer_twins_pcpvt_small_fp16_accuracy.sh +# Performance +bash scripts/infer_twins_pcpvt_small_fp16_performance.sh +``` + +## Model Results + +| Model | BatchSize | Precision | FPS | Top-1(%) | Top-5(%) | +| ------------ | --------- | --------- | -------- | -------- | -------- | +| Twins_PCPVT | 32 | FP16 | 1552.92 | 80.93 | 95.633 | + +## References + +- [mmpretrain](https://github.com/open-mmlab/mmpretrain) diff --git a/models/cv/classification/twins_pcpvt/igie/build_engine.py b/models/cv/classification/twins_pcpvt/igie/build_engine.py new file mode 100644 index 00000000..d3626ae7 --- /dev/null +++ b/models/cv/classification/twins_pcpvt/igie/build_engine.py @@ -0,0 +1,73 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tvm +import argparse +from tvm import relay +from tvm.relay.import_model import import_model_to_igie + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--model_path", + type=str, + required=True, + help="original model path.") + + parser.add_argument("--engine_path", + type=str, + required=True, + help="igie export engine path.") + + parser.add_argument("--input", + type=str, + required=True, + help=""" + input info of the model, format should be: + input_name:input_shape + eg: --input input:1,3,224,224. + """) + + parser.add_argument("--precision", + type=str, + choices=["fp32", "fp16", "int8"], + required=True, + help="model inference precision.") + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + + # get input valueinfo + input_name, input_shape = args.input.split(":") + shape = tuple([int(s) for s in input_shape.split(",")]) + input_dict = {input_name: shape} + + target = tvm.target.iluvatar(model="MR", options="-libs=cudnn,cublas,ixinfer") + + mod, params = import_model_to_igie(args.model_path, input_dict, backend="igie") + + # build engine + lib = tvm.relay.build(mod, target=target, params=params, precision=args.precision) + + # export engine + lib.export_library(args.engine_path) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/models/cv/classification/twins_pcpvt/igie/ci/prepare.sh b/models/cv/classification/twins_pcpvt/igie/ci/prepare.sh new file mode 100644 index 00000000..cd65999b --- /dev/null +++ b/models/cv/classification/twins_pcpvt/igie/ci/prepare.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -x + +ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') +if [[ ${ID} == "ubuntu" ]]; then + apt install -y libgl1-mesa-glx +elif [[ ${ID} == "centos" ]]; then + yum install -y mesa-libGL +else + echo "Not Support Os" +fi +pip3 install -r requirements.txt +unzip -q /mnt/deepspark/data/repos/mmpretrain-0.24.0.zip -d ./ +# export onnx model +python3 export.py --cfg mmpretrain/configs/twins/twins-pcpvt-small_8xb128_in1k.py --weight twins-pcpvt-small_3rdparty_8xb128_in1k_20220126-ef23c132.pth --output twins_pcpvt_small.onnx + +# Use onnxsim optimize onnx model +onnxsim twins_pcpvt_small.onnx twins_pcpvt_small_opt.onnx diff --git a/models/cv/classification/twins_pcpvt/igie/export.py b/models/cv/classification/twins_pcpvt/igie/export.py new file mode 100644 index 00000000..6eafb7b1 --- /dev/null +++ b/models/cv/classification/twins_pcpvt/igie/export.py @@ -0,0 +1,78 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import argparse + +import torch +from mmcls.apis import init_model + +class Model(torch.nn.Module): + def __init__(self, config_file, checkpoint_file): + super().__init__() + self.model = init_model(config_file, checkpoint_file, device="cpu") + + def forward(self, x): + feat = self.model.backbone(x) + feat = self.model.neck(feat[0]) + out_head = self.model.head.fc(feat) + return out_head + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--weight", + type=str, + required=True, + help="pytorch model weight.") + + parser.add_argument("--cfg", + type=str, + required=True, + help="model config file.") + + parser.add_argument("--output", + type=str, + required=True, + help="export onnx model path.") + + args = parser.parse_args() + return args + +def main(): + args = parse_args() + + config_file = args.cfg + checkpoint_file = args.weight + model = Model(config_file, checkpoint_file).eval() + + input_names = ['input'] + output_names = ['output'] + dynamic_axes = {'input': {0: '-1'}, 'output': {0: '-1'}} + dummy_input = torch.randn(1, 3, 224, 224) + + torch.onnx.export( + model, + dummy_input, + args.output, + input_names = input_names, + dynamic_axes = dynamic_axes, + output_names = output_names, + opset_version=13 + ) + + print("Export onnx model successfully! ") + +if __name__ == '__main__': + main() + diff --git a/models/cv/classification/twins_pcpvt/igie/inference.py b/models/cv/classification/twins_pcpvt/igie/inference.py new file mode 100644 index 00000000..1b0c602a --- /dev/null +++ b/models/cv/classification/twins_pcpvt/igie/inference.py @@ -0,0 +1,185 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys +import argparse +import tvm +import torch +import torchvision +import numpy as np +from tvm import relay +from tqdm import tqdm +from torchvision import transforms + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--engine", + type=str, + required=True, + help="igie engine path.") + + parser.add_argument("--batchsize", + type=int, + required=True, + help="inference batch size.") + + parser.add_argument("--datasets", + type=str, + required=True, + help="datasets path.") + + parser.add_argument("--input_name", + type=str, + required=True, + help="input name of the model.") + + parser.add_argument("--warmup", + type=int, + default=3, + help="number of warmup before test.") + + parser.add_argument("--num_workers", + type=int, + default=16, + help="number of workers used in pytorch dataloader.") + + parser.add_argument("--acc_target", + type=float, + default=None, + help="Model inference Accuracy target.") + + parser.add_argument("--fps_target", + type=float, + default=None, + help="Model inference FPS target.") + + parser.add_argument("--perf_only", + type=bool, + default=False, + help="Run performance test only") + + args = parser.parse_args() + + return args + +def get_dataloader(data_path, batch_size, num_workers): + dataset = torchvision.datasets.ImageFolder( + data_path, + transforms.Compose( + [ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.PILToTensor(), + transforms.ConvertImageDtype(torch.float), + transforms.Normalize( + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225) + ) + ] + ) + ) + + dataloader = torch.utils.data.DataLoader(dataset, batch_size, num_workers=num_workers) + + return dataloader + +def get_topk_accuracy(pred, label): + if isinstance(pred, np.ndarray): + pred = torch.from_numpy(pred) + + if isinstance(label, np.ndarray): + label = torch.from_numpy(label) + + top1_acc = 0 + top5_acc = 0 + for idx in range(len(label)): + label_value = label[idx] + if label_value == torch.topk(pred[idx].float(), 1).indices.data: + top1_acc += 1 + top5_acc += 1 + + elif label_value in torch.topk(pred[idx].float(), 5).indices.data: + top5_acc += 1 + + return top1_acc, top5_acc + +def main(): + args = parse_args() + + batch_size = args.batchsize + + # create iluvatar target & device + target = tvm.target.iluvatar(model="MR", options="-libs=cudnn,cublas,ixinfer") + device = tvm.device(target.kind.name, 0) + + # load engine + lib = tvm.runtime.load_module(args.engine) + + # create runtime from engine + module = tvm.contrib.graph_executor.GraphModule(lib["default"](device)) + + # just run perf test + if args.perf_only: + ftimer = module.module.time_evaluator("run", device, number=100, repeat=1) + prof_res = np.array(ftimer().results) * 1000 + fps = batch_size * 1000 / np.mean(prof_res) + print(f"\n* Mean inference time: {np.mean(prof_res):.3f} ms, Mean fps: {fps:.3f}") + else: + # warm up + for _ in range(args.warmup): + module.run() + + # get dataloader + dataloader = get_dataloader(args.datasets, batch_size, args.num_workers) + + top1_acc = 0 + top5_acc = 0 + total_num = 0 + + for image, label in tqdm(dataloader): + + # pad the last batch + pad_batch = len(image) != batch_size + + if pad_batch: + origin_size = len(image) + image = np.resize(image, (batch_size, *image.shape[1:])) + + module.set_input(args.input_name, tvm.nd.array(image, device)) + + # run inference + module.run() + + pred = module.get_output(0).asnumpy() + + if pad_batch: + pred = pred[:origin_size] + + # get batch accuracy + batch_top1_acc, batch_top5_acc = get_topk_accuracy(pred, label) + + top1_acc += batch_top1_acc + top5_acc += batch_top5_acc + total_num += batch_size + + result_stat = {} + result_stat["acc@1"] = round(top1_acc / total_num * 100.0, 3) + result_stat["acc@5"] = round(top5_acc / total_num * 100.0, 3) + + print(f"\n* Top1 acc: {result_stat['acc@1']} %, Top5 acc: {result_stat['acc@5']} %") + +if __name__ == "__main__": + main() diff --git a/models/cv/classification/twins_pcpvt/igie/requirements.txt b/models/cv/classification/twins_pcpvt/igie/requirements.txt new file mode 100644 index 00000000..41c31663 --- /dev/null +++ b/models/cv/classification/twins_pcpvt/igie/requirements.txt @@ -0,0 +1,5 @@ +onnx +tqdm +onnxsim +mmcv==1.5.3 +mmcls diff --git a/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_accuracy.sh b/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_accuracy.sh new file mode 100644 index 00000000..7a35a31e --- /dev/null +++ b/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_accuracy.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="twins_pcpvt_small_opt.onnx" +datasets_path=${DATASETS_DIR} + +# build engine +python3 build_engine.py \ + --model_path ${model_path} \ + --input input:${batchsize},3,224,224 \ + --precision fp16 \ + --engine_path twins_pcpvt_small_opt_bs_${batchsize}_fp16.so + + +# inference +python3 inference.py \ + --engine twins_pcpvt_small_opt_bs_${batchsize}_fp16.so \ + --batchsize ${batchsize} \ + --input_name input \ + --datasets ${datasets_path} \ No newline at end of file diff --git a/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_performance.sh b/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_performance.sh new file mode 100644 index 00000000..126e7c78 --- /dev/null +++ b/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_performance.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="twins_pcpvt_small_opt.onnx" +datasets_path=${DATASETS_DIR} + +# build engine +python3 build_engine.py \ + --model_path ${model_path} \ + --input input:${batchsize},3,224,224 \ + --precision fp16 \ + --engine_path twins_pcpvt_small_opt_bs_${batchsize}_fp16.so + + +# inference +python3 inference.py \ + --engine twins_pcpvt_small_opt_bs_${batchsize}_fp16.so \ + --batchsize ${batchsize} \ + --input_name input \ + --datasets ${datasets_path} \ + --perf_only True \ No newline at end of file -- Gitee From 7f0fb2fe06065b2e046feb43c98a9c9a831b5062 Mon Sep 17 00:00:00 2001 From: YoungPeng Date: Wed, 16 Apr 2025 10:36:09 +0800 Subject: [PATCH 4/5] Add: van_b0 inference script. --- .../cv/classification/van_b0/igie/README.md | 67 +++++++ .../van_b0/igie/build_engine.py | 73 +++++++ .../classification/van_b0/igie/ci/prepare.sh | 33 ++++ .../cv/classification/van_b0/igie/export.py | 78 ++++++++ .../classification/van_b0/igie/inference.py | 185 ++++++++++++++++++ .../van_b0/igie/requirements.txt | 4 + .../scripts/infer_van_b0_fp16_accuracy.sh | 35 ++++ .../scripts/infer_van_b0_fp16_performance.sh | 36 ++++ 8 files changed, 511 insertions(+) create mode 100644 models/cv/classification/van_b0/igie/README.md create mode 100644 models/cv/classification/van_b0/igie/build_engine.py create mode 100644 models/cv/classification/van_b0/igie/ci/prepare.sh create mode 100644 models/cv/classification/van_b0/igie/export.py create mode 100644 models/cv/classification/van_b0/igie/inference.py create mode 100644 models/cv/classification/van_b0/igie/requirements.txt create mode 100644 models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_accuracy.sh create mode 100644 models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_performance.sh diff --git a/models/cv/classification/van_b0/igie/README.md b/models/cv/classification/van_b0/igie/README.md new file mode 100644 index 00000000..985b564b --- /dev/null +++ b/models/cv/classification/van_b0/igie/README.md @@ -0,0 +1,67 @@ +# VAN_B0 (IGIE) + +## Model Description + +VAN-B0 is a lightweight visual attention network that combines convolution and attention mechanisms to enhance image classification performance. It achieves efficient feature capture by focusing on key areas and multi-scale feature extraction, making it suitable for running on resource-constrained devices. + +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| MR-V100 | 4.2.0 | 25.06 | + +## Model Preparation + +### Prepare Resources + +Pretrained model: + +Dataset: to download the validation dataset. + +### Install Dependencies + +```bash +# Install libGL +## CentOS +yum install -y mesa-libGL +## Ubuntu +apt install -y libgl1-mesa-glx + +pip3 install -r requirements.txt +``` + +### Model Conversion + +```bash +# git clone mmpretrain +git clone -b v0.24.0 https://github.com/open-mmlab/mmpretrain.git + +# export onnx model +python3 export.py --cfg mmpretrain/configs/van/van-b0_8xb128_in1k.py --weight van-tiny_8xb128_in1k_20220501-385941af.pth --output van_b0.onnx + +``` + +## Model Inference + +```bash +export DATASETS_DIR=/Path/to/imagenet_val/ +``` + +### FP16 + +```bash +# Accuracy +bash scripts/infer_van_b0_fp16_accuracy.sh +# Performance +bash scripts/infer_van_b0_fp16_performance.sh +``` + +## Model Results + +| Model | BatchSize | Precision | FPS | Top-1(%) | Top-5(%) | +| ---------- | --------- | --------- | -------- | -------- | -------- | +| VAN_B0 | 32 | FP16 | 2155.35 | 72.079 | 91.209 | + +## References + +- [mmpretrain](https://github.com/open-mmlab/mmpretrain) diff --git a/models/cv/classification/van_b0/igie/build_engine.py b/models/cv/classification/van_b0/igie/build_engine.py new file mode 100644 index 00000000..d3626ae7 --- /dev/null +++ b/models/cv/classification/van_b0/igie/build_engine.py @@ -0,0 +1,73 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tvm +import argparse +from tvm import relay +from tvm.relay.import_model import import_model_to_igie + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--model_path", + type=str, + required=True, + help="original model path.") + + parser.add_argument("--engine_path", + type=str, + required=True, + help="igie export engine path.") + + parser.add_argument("--input", + type=str, + required=True, + help=""" + input info of the model, format should be: + input_name:input_shape + eg: --input input:1,3,224,224. + """) + + parser.add_argument("--precision", + type=str, + choices=["fp32", "fp16", "int8"], + required=True, + help="model inference precision.") + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + + # get input valueinfo + input_name, input_shape = args.input.split(":") + shape = tuple([int(s) for s in input_shape.split(",")]) + input_dict = {input_name: shape} + + target = tvm.target.iluvatar(model="MR", options="-libs=cudnn,cublas,ixinfer") + + mod, params = import_model_to_igie(args.model_path, input_dict, backend="igie") + + # build engine + lib = tvm.relay.build(mod, target=target, params=params, precision=args.precision) + + # export engine + lib.export_library(args.engine_path) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/models/cv/classification/van_b0/igie/ci/prepare.sh b/models/cv/classification/van_b0/igie/ci/prepare.sh new file mode 100644 index 00000000..77832a05 --- /dev/null +++ b/models/cv/classification/van_b0/igie/ci/prepare.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -x + +ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') +if [[ ${ID} == "ubuntu" ]]; then + apt install -y libgl1-mesa-glx +elif [[ ${ID} == "centos" ]]; then + yum install -y mesa-libGL +else + echo "Not Support Os" +fi +pip3 install -r requirements.txt +unzip -q /mnt/deepspark/data/repos/mmpretrain-0.24.0.zip -d ./ +# export onnx model +python3 export.py --cfg mmpretrain/configs/van/van-b0_8xb128_in1k.py --weight van-tiny_8xb128_in1k_20220501-385941af.pth --output van_b0.onnx + +# Use onnxsim optimize onnx model +onnxsim van_b0.onnx van_b0_opt.onnx \ No newline at end of file diff --git a/models/cv/classification/van_b0/igie/export.py b/models/cv/classification/van_b0/igie/export.py new file mode 100644 index 00000000..6eafb7b1 --- /dev/null +++ b/models/cv/classification/van_b0/igie/export.py @@ -0,0 +1,78 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import argparse + +import torch +from mmcls.apis import init_model + +class Model(torch.nn.Module): + def __init__(self, config_file, checkpoint_file): + super().__init__() + self.model = init_model(config_file, checkpoint_file, device="cpu") + + def forward(self, x): + feat = self.model.backbone(x) + feat = self.model.neck(feat[0]) + out_head = self.model.head.fc(feat) + return out_head + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--weight", + type=str, + required=True, + help="pytorch model weight.") + + parser.add_argument("--cfg", + type=str, + required=True, + help="model config file.") + + parser.add_argument("--output", + type=str, + required=True, + help="export onnx model path.") + + args = parser.parse_args() + return args + +def main(): + args = parse_args() + + config_file = args.cfg + checkpoint_file = args.weight + model = Model(config_file, checkpoint_file).eval() + + input_names = ['input'] + output_names = ['output'] + dynamic_axes = {'input': {0: '-1'}, 'output': {0: '-1'}} + dummy_input = torch.randn(1, 3, 224, 224) + + torch.onnx.export( + model, + dummy_input, + args.output, + input_names = input_names, + dynamic_axes = dynamic_axes, + output_names = output_names, + opset_version=13 + ) + + print("Export onnx model successfully! ") + +if __name__ == '__main__': + main() + diff --git a/models/cv/classification/van_b0/igie/inference.py b/models/cv/classification/van_b0/igie/inference.py new file mode 100644 index 00000000..1b0c602a --- /dev/null +++ b/models/cv/classification/van_b0/igie/inference.py @@ -0,0 +1,185 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys +import argparse +import tvm +import torch +import torchvision +import numpy as np +from tvm import relay +from tqdm import tqdm +from torchvision import transforms + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--engine", + type=str, + required=True, + help="igie engine path.") + + parser.add_argument("--batchsize", + type=int, + required=True, + help="inference batch size.") + + parser.add_argument("--datasets", + type=str, + required=True, + help="datasets path.") + + parser.add_argument("--input_name", + type=str, + required=True, + help="input name of the model.") + + parser.add_argument("--warmup", + type=int, + default=3, + help="number of warmup before test.") + + parser.add_argument("--num_workers", + type=int, + default=16, + help="number of workers used in pytorch dataloader.") + + parser.add_argument("--acc_target", + type=float, + default=None, + help="Model inference Accuracy target.") + + parser.add_argument("--fps_target", + type=float, + default=None, + help="Model inference FPS target.") + + parser.add_argument("--perf_only", + type=bool, + default=False, + help="Run performance test only") + + args = parser.parse_args() + + return args + +def get_dataloader(data_path, batch_size, num_workers): + dataset = torchvision.datasets.ImageFolder( + data_path, + transforms.Compose( + [ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.PILToTensor(), + transforms.ConvertImageDtype(torch.float), + transforms.Normalize( + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225) + ) + ] + ) + ) + + dataloader = torch.utils.data.DataLoader(dataset, batch_size, num_workers=num_workers) + + return dataloader + +def get_topk_accuracy(pred, label): + if isinstance(pred, np.ndarray): + pred = torch.from_numpy(pred) + + if isinstance(label, np.ndarray): + label = torch.from_numpy(label) + + top1_acc = 0 + top5_acc = 0 + for idx in range(len(label)): + label_value = label[idx] + if label_value == torch.topk(pred[idx].float(), 1).indices.data: + top1_acc += 1 + top5_acc += 1 + + elif label_value in torch.topk(pred[idx].float(), 5).indices.data: + top5_acc += 1 + + return top1_acc, top5_acc + +def main(): + args = parse_args() + + batch_size = args.batchsize + + # create iluvatar target & device + target = tvm.target.iluvatar(model="MR", options="-libs=cudnn,cublas,ixinfer") + device = tvm.device(target.kind.name, 0) + + # load engine + lib = tvm.runtime.load_module(args.engine) + + # create runtime from engine + module = tvm.contrib.graph_executor.GraphModule(lib["default"](device)) + + # just run perf test + if args.perf_only: + ftimer = module.module.time_evaluator("run", device, number=100, repeat=1) + prof_res = np.array(ftimer().results) * 1000 + fps = batch_size * 1000 / np.mean(prof_res) + print(f"\n* Mean inference time: {np.mean(prof_res):.3f} ms, Mean fps: {fps:.3f}") + else: + # warm up + for _ in range(args.warmup): + module.run() + + # get dataloader + dataloader = get_dataloader(args.datasets, batch_size, args.num_workers) + + top1_acc = 0 + top5_acc = 0 + total_num = 0 + + for image, label in tqdm(dataloader): + + # pad the last batch + pad_batch = len(image) != batch_size + + if pad_batch: + origin_size = len(image) + image = np.resize(image, (batch_size, *image.shape[1:])) + + module.set_input(args.input_name, tvm.nd.array(image, device)) + + # run inference + module.run() + + pred = module.get_output(0).asnumpy() + + if pad_batch: + pred = pred[:origin_size] + + # get batch accuracy + batch_top1_acc, batch_top5_acc = get_topk_accuracy(pred, label) + + top1_acc += batch_top1_acc + top5_acc += batch_top5_acc + total_num += batch_size + + result_stat = {} + result_stat["acc@1"] = round(top1_acc / total_num * 100.0, 3) + result_stat["acc@5"] = round(top5_acc / total_num * 100.0, 3) + + print(f"\n* Top1 acc: {result_stat['acc@1']} %, Top5 acc: {result_stat['acc@5']} %") + +if __name__ == "__main__": + main() diff --git a/models/cv/classification/van_b0/igie/requirements.txt b/models/cv/classification/van_b0/igie/requirements.txt new file mode 100644 index 00000000..4d5ea05f --- /dev/null +++ b/models/cv/classification/van_b0/igie/requirements.txt @@ -0,0 +1,4 @@ +onnx +tqdm +mmcv==1.5.3 +mmcls diff --git a/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_accuracy.sh b/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_accuracy.sh new file mode 100644 index 00000000..0ceb58b3 --- /dev/null +++ b/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_accuracy.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="van_b0.onnx" +datasets_path=${DATASETS_DIR} + +# build engine +python3 build_engine.py \ + --model_path ${model_path} \ + --input input:${batchsize},3,224,224 \ + --precision fp16 \ + --engine_path van_b0_bs_${batchsize}_fp16.so + + +# inference +python3 inference.py \ + --engine van_b0_bs_${batchsize}_fp16.so \ + --batchsize ${batchsize} \ + --input_name input \ + --datasets ${datasets_path} \ No newline at end of file diff --git a/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_performance.sh b/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_performance.sh new file mode 100644 index 00000000..a3464da0 --- /dev/null +++ b/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_performance.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="van_b0.onnx" +datasets_path=${DATASETS_DIR} + +# build engine +python3 build_engine.py \ + --model_path ${model_path} \ + --input input:${batchsize},3,224,224 \ + --precision fp16 \ + --engine_path van_b0_bs_${batchsize}_fp16.so + + +# inference +python3 inference.py \ + --engine van_b0_bs_${batchsize}_fp16.so \ + --batchsize ${batchsize} \ + --input_name input \ + --datasets ${datasets_path} \ + --perf_only True \ No newline at end of file -- Gitee From d409f324d4576cae2623c419a735fcd26c3843ec Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Wed, 16 Apr 2025 14:26:47 +0800 Subject: [PATCH 5/5] fix ci error: 1. update copyrigt year to 2025 2. change twins pcpvt script name same with model name 3. fix yolov12 spell error --- models/cv/classification/cspresnext50/igie/build_engine.py | 2 +- models/cv/classification/cspresnext50/igie/ci/prepare.sh | 2 +- models/cv/classification/cspresnext50/igie/export.py | 2 +- models/cv/classification/cspresnext50/igie/inference.py | 2 +- .../igie/scripts/infer_cspresnext50_fp16_accuracy.sh | 2 +- .../igie/scripts/infer_cspresnext50_fp16_performance.sh | 2 +- models/cv/classification/twins_pcpvt/igie/README.md | 4 ++-- models/cv/classification/twins_pcpvt/igie/build_engine.py | 2 +- models/cv/classification/twins_pcpvt/igie/ci/prepare.sh | 2 +- models/cv/classification/twins_pcpvt/igie/export.py | 2 +- models/cv/classification/twins_pcpvt/igie/inference.py | 2 +- ...fp16_accuracy.sh => infer_twins_pcpvt_fp16_accuracy.sh} | 2 +- ...erformance.sh => infer_twins_pcpvt_fp16_performance.sh} | 2 +- models/cv/classification/van_b0/igie/build_engine.py | 2 +- models/cv/classification/van_b0/igie/ci/prepare.sh | 7 ++----- models/cv/classification/van_b0/igie/export.py | 2 +- models/cv/classification/van_b0/igie/inference.py | 2 +- .../van_b0/igie/scripts/infer_van_b0_fp16_accuracy.sh | 2 +- .../van_b0/igie/scripts/infer_van_b0_fp16_performance.sh | 2 +- models/cv/object_detection/yolov12/igie/build_engine.py | 2 +- models/cv/object_detection/yolov12/igie/ci/prepare.sh | 4 ++-- models/cv/object_detection/yolov12/igie/export.py | 2 +- models/cv/object_detection/yolov12/igie/inference.py | 2 +- .../yolov12/igie/scripts/infer_yolov12_fp16_accuracy.sh | 2 +- .../yolov12/igie/scripts/infer_yolov12_fp16_performance.sh | 2 +- models/cv/object_detection/yolov12/igie/validator.py | 2 +- 26 files changed, 29 insertions(+), 32 deletions(-) rename models/cv/classification/twins_pcpvt/igie/scripts/{infer_twins_pcpvt_small_fp16_accuracy.sh => infer_twins_pcpvt_fp16_accuracy.sh} (95%) rename models/cv/classification/twins_pcpvt/igie/scripts/{infer_twins_pcpvt_small_fp16_performance.sh => infer_twins_pcpvt_fp16_performance.sh} (95%) diff --git a/models/cv/classification/cspresnext50/igie/build_engine.py b/models/cv/classification/cspresnext50/igie/build_engine.py index d3626ae7..54aa8847 100644 --- a/models/cv/classification/cspresnext50/igie/build_engine.py +++ b/models/cv/classification/cspresnext50/igie/build_engine.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/cspresnext50/igie/ci/prepare.sh b/models/cv/classification/cspresnext50/igie/ci/prepare.sh index 2c6f6bf1..5f18b379 100644 --- a/models/cv/classification/cspresnext50/igie/ci/prepare.sh +++ b/models/cv/classification/cspresnext50/igie/ci/prepare.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/cspresnext50/igie/export.py b/models/cv/classification/cspresnext50/igie/export.py index 6eafb7b1..c7681b68 100644 --- a/models/cv/classification/cspresnext50/igie/export.py +++ b/models/cv/classification/cspresnext50/igie/export.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/cspresnext50/igie/inference.py b/models/cv/classification/cspresnext50/igie/inference.py index 1b0c602a..b0a8ed03 100644 --- a/models/cv/classification/cspresnext50/igie/inference.py +++ b/models/cv/classification/cspresnext50/igie/inference.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_accuracy.sh b/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_accuracy.sh index e3062666..7086d1e1 100644 --- a/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_accuracy.sh +++ b/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_accuracy.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_performance.sh b/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_performance.sh index 38f8b10e..1716d61c 100644 --- a/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_performance.sh +++ b/models/cv/classification/cspresnext50/igie/scripts/infer_cspresnext50_fp16_performance.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/twins_pcpvt/igie/README.md b/models/cv/classification/twins_pcpvt/igie/README.md index 4695e41e..f89691d0 100644 --- a/models/cv/classification/twins_pcpvt/igie/README.md +++ b/models/cv/classification/twins_pcpvt/igie/README.md @@ -54,9 +54,9 @@ export DATASETS_DIR=/Path/to/imagenet_val/ ```bash # Accuracy -bash scripts/infer_twins_pcpvt_small_fp16_accuracy.sh +bash scripts/infer_twins_pcpvt_fp16_accuracy.sh # Performance -bash scripts/infer_twins_pcpvt_small_fp16_performance.sh +bash scripts/infer_twins_pcpvt_fp16_performance.sh ``` ## Model Results diff --git a/models/cv/classification/twins_pcpvt/igie/build_engine.py b/models/cv/classification/twins_pcpvt/igie/build_engine.py index d3626ae7..54aa8847 100644 --- a/models/cv/classification/twins_pcpvt/igie/build_engine.py +++ b/models/cv/classification/twins_pcpvt/igie/build_engine.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/twins_pcpvt/igie/ci/prepare.sh b/models/cv/classification/twins_pcpvt/igie/ci/prepare.sh index cd65999b..b377a631 100644 --- a/models/cv/classification/twins_pcpvt/igie/ci/prepare.sh +++ b/models/cv/classification/twins_pcpvt/igie/ci/prepare.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/twins_pcpvt/igie/export.py b/models/cv/classification/twins_pcpvt/igie/export.py index 6eafb7b1..c7681b68 100644 --- a/models/cv/classification/twins_pcpvt/igie/export.py +++ b/models/cv/classification/twins_pcpvt/igie/export.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/twins_pcpvt/igie/inference.py b/models/cv/classification/twins_pcpvt/igie/inference.py index 1b0c602a..b0a8ed03 100644 --- a/models/cv/classification/twins_pcpvt/igie/inference.py +++ b/models/cv/classification/twins_pcpvt/igie/inference.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_accuracy.sh b/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_fp16_accuracy.sh similarity index 95% rename from models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_accuracy.sh rename to models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_fp16_accuracy.sh index 7a35a31e..3d372907 100644 --- a/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_accuracy.sh +++ b/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_fp16_accuracy.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_performance.sh b/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_fp16_performance.sh similarity index 95% rename from models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_performance.sh rename to models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_fp16_performance.sh index 126e7c78..f7e32f1a 100644 --- a/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_small_fp16_performance.sh +++ b/models/cv/classification/twins_pcpvt/igie/scripts/infer_twins_pcpvt_fp16_performance.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/van_b0/igie/build_engine.py b/models/cv/classification/van_b0/igie/build_engine.py index d3626ae7..54aa8847 100644 --- a/models/cv/classification/van_b0/igie/build_engine.py +++ b/models/cv/classification/van_b0/igie/build_engine.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/van_b0/igie/ci/prepare.sh b/models/cv/classification/van_b0/igie/ci/prepare.sh index 77832a05..076ffcbe 100644 --- a/models/cv/classification/van_b0/igie/ci/prepare.sh +++ b/models/cv/classification/van_b0/igie/ci/prepare.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -27,7 +27,4 @@ fi pip3 install -r requirements.txt unzip -q /mnt/deepspark/data/repos/mmpretrain-0.24.0.zip -d ./ # export onnx model -python3 export.py --cfg mmpretrain/configs/van/van-b0_8xb128_in1k.py --weight van-tiny_8xb128_in1k_20220501-385941af.pth --output van_b0.onnx - -# Use onnxsim optimize onnx model -onnxsim van_b0.onnx van_b0_opt.onnx \ No newline at end of file +python3 export.py --cfg mmpretrain/configs/van/van-b0_8xb128_in1k.py --weight van-tiny_8xb128_in1k_20220501-385941af.pth --output van_b0.onnx \ No newline at end of file diff --git a/models/cv/classification/van_b0/igie/export.py b/models/cv/classification/van_b0/igie/export.py index 6eafb7b1..c7681b68 100644 --- a/models/cv/classification/van_b0/igie/export.py +++ b/models/cv/classification/van_b0/igie/export.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/van_b0/igie/inference.py b/models/cv/classification/van_b0/igie/inference.py index 1b0c602a..b0a8ed03 100644 --- a/models/cv/classification/van_b0/igie/inference.py +++ b/models/cv/classification/van_b0/igie/inference.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_accuracy.sh b/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_accuracy.sh index 0ceb58b3..9b993277 100644 --- a/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_accuracy.sh +++ b/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_accuracy.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_performance.sh b/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_performance.sh index a3464da0..8aa8caef 100644 --- a/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_performance.sh +++ b/models/cv/classification/van_b0/igie/scripts/infer_van_b0_fp16_performance.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov12/igie/build_engine.py b/models/cv/object_detection/yolov12/igie/build_engine.py index d3626ae7..54aa8847 100644 --- a/models/cv/object_detection/yolov12/igie/build_engine.py +++ b/models/cv/object_detection/yolov12/igie/build_engine.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov12/igie/ci/prepare.sh b/models/cv/object_detection/yolov12/igie/ci/prepare.sh index 09a8d532..44a36b9a 100644 --- a/models/cv/object_detection/yolov12/igie/ci/prepare.sh +++ b/models/cv/object_detection/yolov12/igie/ci/prepare.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -33,4 +33,4 @@ cd yolov12 pip3 install -e . cd .. -python3 export.py --weight yolo12n.pt --batch 32 +python3 export.py --weight yolov12n.pt --batch 32 diff --git a/models/cv/object_detection/yolov12/igie/export.py b/models/cv/object_detection/yolov12/igie/export.py index aec62f72..780b9b2a 100644 --- a/models/cv/object_detection/yolov12/igie/export.py +++ b/models/cv/object_detection/yolov12/igie/export.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov12/igie/inference.py b/models/cv/object_detection/yolov12/igie/inference.py index cbed1c03..2286d11c 100644 --- a/models/cv/object_detection/yolov12/igie/inference.py +++ b/models/cv/object_detection/yolov12/igie/inference.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_accuracy.sh b/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_accuracy.sh index d2a725f9..14656322 100644 --- a/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_accuracy.sh +++ b/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_accuracy.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_performance.sh b/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_performance.sh index 0b7d208d..cd954ed2 100644 --- a/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_performance.sh +++ b/models/cv/object_detection/yolov12/igie/scripts/infer_yolov12_fp16_performance.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov12/igie/validator.py b/models/cv/object_detection/yolov12/igie/validator.py index b717b5c4..113693da 100644 --- a/models/cv/object_detection/yolov12/igie/validator.py +++ b/models/cv/object_detection/yolov12/igie/validator.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may -- Gitee