From fb39ed618f45c4b62a7862e46235c6fda7cc294a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=BF=8E=E6=8E=A5=E5=85=89=E8=BE=89=E5=B2=81=E6=9C=88?= Date: Fri, 1 Sep 2023 07:43:57 +0000 Subject: [PATCH 01/12] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20open=5Fclip?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ACL_PyTorch/built-in/foundation_models/open_clip/.keep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 ACL_PyTorch/built-in/foundation_models/open_clip/.keep diff --git a/ACL_PyTorch/built-in/foundation_models/open_clip/.keep b/ACL_PyTorch/built-in/foundation_models/open_clip/.keep new file mode 100644 index 0000000000..e69de29bb2 -- Gitee From a6edbbab1138077c40e5b6b70c5289298edce6bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=BF=8E=E6=8E=A5=E5=85=89=E8=BE=89=E5=B2=81=E6=9C=88?= Date: Fri, 1 Sep 2023 07:44:18 +0000 Subject: [PATCH 02/12] add ACL_PyTorch/built-in/foundation_models/open_clip. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 迎接光辉岁月 --- ACL_PyTorch/built-in/foundation_models/open_clip/export.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 ACL_PyTorch/built-in/foundation_models/open_clip/export.py diff --git a/ACL_PyTorch/built-in/foundation_models/open_clip/export.py b/ACL_PyTorch/built-in/foundation_models/open_clip/export.py new file mode 100644 index 0000000000..e69de29bb2 -- Gitee From 42dd42a6b1691abdcac5c818d5bfaf89b7605318 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=BF=8E=E6=8E=A5=E5=85=89=E8=BE=89=E5=B2=81=E6=9C=88?= Date: Fri, 1 Sep 2023 07:47:06 +0000 Subject: [PATCH 03/12] update ACL_PyTorch/built-in/foundation_models/open_clip/export.py. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 迎接光辉岁月 --- .../foundation_models/open_clip/export.py | 97 +++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/ACL_PyTorch/built-in/foundation_models/open_clip/export.py b/ACL_PyTorch/built-in/foundation_models/open_clip/export.py index e69de29bb2..2c8a2e73c3 100644 --- a/ACL_PyTorch/built-in/foundation_models/open_clip/export.py +++ b/ACL_PyTorch/built-in/foundation_models/open_clip/export.py @@ -0,0 +1,97 @@ +import torch +import torch.onnx +from PIL import Image +from onnx import load_model, save_model +from onnxmltools.utils import convert_float_to_float16 +import onnxruntime +import open_clip + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--model-arch", + required=True, + choices=["ViT-B-16", "ViT-L-14", "ViT-L-14-336", "ViT-H-14", "RN50"], + help="Specify the architecture (model scale) of Chinese-CLIP model to be converted." + ) + parser.add_argument( + "--pytorch-ckpt-path", + default=None, + type=str, + help="Path of the input PyTorch Chinese-CLIP checkpoint. Default to None which will automatically download the pretrained checkpoint." + ) + parser.add_argument( + "--download-root", + default=None, + type=str, + help="If --pytorch-ckpt-path is None, official pretrained ckpt will be downloaded under --download-root directory and converted. Default to ~/cache/clip/ ." + ) + parser.add_argument( + "--save-onnx-path", + required=True, + type=str, + help="Path (prefix) of the output converted ONNX Chinese-CLIP text or vision model." + ) + parser.add_argument( + "--convert-text", + action="store_true", + help="Whether to convert the text encoder (text feature extractor) into ONNX." + ) + parser.add_argument( + "--convert-vision", + action="store_true", + help="Whether to convert the vision encoder (vision feature extractor) into ONNX." + ) + parser.add_argument( + "--context-length", type=int, default=52, help="The padded length of input text (include [CLS] & [SEP] tokens). Default to 52." + ) + args = parser.parse_args() + return args + +if __name__ == '__main__': + model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') + tokenizer = open_clip.get_tokenizer('ViT-B-32') + image = preprocess(Image.open("CLIP.png")).unsqueeze(0) + text = tokenizer(["a diagram", "a dog", "a cat"]) + args = parse_args() + # perform conversions, ONNX text and vision encoders will be saved into separated files + if args.convert_text: + # convert text FP32 ONNX model + text_fp32_onnx_path = f"{args.save_onnx_path}.txt.fp32.onnx" + torch.onnx.export(model, + (None, text), + text_fp32_onnx_path, + input_names=['text'], + output_names=['unnorm_text_features'], + export_params=True, + opset_version=13, + verbose=True) + # convert text FP16 ONNX model based on the FP32 model + text_fp16_onnx_path = f"{args.save_onnx_path}.txt.fp16.onnx" + text_fp32_onnx_model = load_model(text_fp32_onnx_path) + text_fp16_onnx_model = convert_float_to_float16(text_fp32_onnx_model, keep_io_types=True, disable_shape_infer=True) + save_model(text_fp16_onnx_model, + text_fp16_onnx_path, + convert_attribute=True) + + if args.convert_vision: + # convert vision FP32 ONNX model + vision_fp32_onnx_path = f"{args.save_onnx_path}.img.fp32.onnx" + vision_fp32_onnx_hasextra = False + torch.onnx.export(model, + (image, None), + vision_fp32_onnx_path, + input_names=['image'], + output_names=['unnorm_image_features'], + export_params=True, + do_constant_folding=False, + opset_version=13, + verbose=True) + + # convert vision FP16 ONNX model based on the FP32 model + vision_fp16_onnx_path = f"{args.save_onnx_path}.img.fp16.onnx" + vision_fp32_onnx_model = load_model(vision_fp32_onnx_path) + vision_fp16_onnx_model = convert_float_to_float16(vision_fp32_onnx_model, keep_io_types=True, disable_shape_infer=True) + save_model(vision_fp16_onnx_model, + vision_fp16_onnx_path, + convert_attribute=True) \ No newline at end of file -- Gitee From 5ff6b27a5ba0c7522b4fd553adcc2f88e1647cc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=BF=8E=E6=8E=A5=E5=85=89=E8=BE=89=E5=B2=81=E6=9C=88?= Date: Mon, 4 Sep 2023 02:10:10 +0000 Subject: [PATCH 04/12] add ACL_PyTorch/built-in/foundation_models/open_clip/readme.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 迎接光辉岁月 --- .../foundation_models/open_clip/readme.md | 287 ++++++++++++++++++ 1 file changed, 287 insertions(+) create mode 100644 ACL_PyTorch/built-in/foundation_models/open_clip/readme.md diff --git a/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md new file mode 100644 index 0000000000..bd48639d88 --- /dev/null +++ b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md @@ -0,0 +1,287 @@ +# OPEN_CLIP模型-推理指导 + + +- [概述](#ZH-CN_TOPIC_0000001172161501) + + - [输入输出数据](#section540883920406) + + + +- [推理环境准备](#ZH-CN_TOPIC_0000001126281702) + +- [快速上手](#ZH-CN_TOPIC_0000001126281700) + + - [获取源码](#section4622531142816) + - [准备数据](#section183221994411) + - [模型推理](#section741711594517) + +- [模型推理性能&精度](#ZH-CN_TOPIC_0000001172201573) + + ****** + + + + +# 概述 + +该存储库的目标是启用具有对比图像-文本监督的训练模型,并研究其属性,例如对分布偏移的鲁棒性。我们的起点是 CLIP 的实现,当在同一数据集上训练时,它与原始 CLIP 模型的准确性相匹配。 + + + + +- 参考实现: + + ``` + url=https://github.com/mlfoundations/open_clip + commit_id=c22a8ecaf95ace2e1ac785e3384689c03754bd40 + code_path=built-in/foundation_models/open_clip + ``` + + + + +## 输入输出数据 + +- 输入数据 + + | 输入数据 | 数据类型 | 大小 | 数据排布格式 | + | -------- | -------- | ------------------------- | ------------ | + | input1 | FP32 | batchsize x 3 x 224 x 224 | NCHW | + | input2 | INT64 | 3 x 77 | NCHW | + +- 输出数据 + + | 输出数据 | 数据类型 | 大小 | 数据排布格式 | + | -------- | -------- | -------- | ------------ | + | output1 | FLOAT32 | 1 x 512 | NCHW | + | output2 | FLOAT32 | 3 x 512 | NCHW | + +# 推理环境准备 + +- 该模型需要以下插件与驱动 + + **表 1** 版本配套表 + + | 配套 | 版本 | 环境准备指导 | + | ------------------------------------------------------------ | ------- | ------------------------------------------------------------ | + | 固件与驱动 | 1.0.17(NPU驱动固件版本为6.0.RC1) | [Pytorch框架推理环境准备](https://www.hiascend.com/document/detail/zh/ModelZoo/pytorchframework/pies) | + | CANN | 6.3.RC1 | - | + | Python | 3.7.5 | - | + | Pytorch | 1.7.0 | - | + + + +# 快速上手 + +## 获取源码 + +1. 安装依赖。 + + ``` + pip install -r requirements.txt + ``` + +2. 获取源码。 + 1. 安装开源仓 + ``` + git clone https://github.com/mlfoundations/open_clip + cd open_clip + git reset --hard c22a8ecaf95ace2e1ac785e3384689c03754bd40 + + + ``` + +## 准备demo数据 +1.数据前处理 + ``` + python3 preprocess.py --convert-text --save-onnx-path ./open_clip --convert-vision + + ``` +## 模型推理 + +1. 模型转换。 + + 运行export.py文件,自动下载权重并转换为.onnx文件,再使用ATC工具将.onnx文件转为离线推理模型文件.om文件。 + 2. 导出onnx文件。 + ``` + python3 export.py --convert-text --save-onnx-path ./open_clip --convert-vision + + ``` + - 参数说明: + - --convert-text : 模型大小 + - --convert-vision : 输出onnx模型 + - --save-onnx-path : 保存onnx的路径 + + 3. 使用ATC工具将ONNX模型转OM模型。 + + 1. 配置环境变量。 + + ``` + source /usr/local/Ascend/ascend-toolkit/set_env.sh + ``` + + 2. 执行命令查看芯片名称($\{chip\_name\})。 + + ``` + npu-smi info + #该设备芯片名为Ascend310P3 (自行替换) + 回显如下: + +-------------------+-----------------+------------------------------------------------------+ + | NPU Name | Health | Power(W) Temp(C) Hugepages-Usage(page) | + | Chip Device | Bus-Id | AICore(%) Memory-Usage(MB) | + +===================+=================+======================================================+ + | 0 310P3 | OK | 15.8 42 0 / 0 | + | 0 0 | 0000:82:00.0 | 0 1074 / 21534 | + +===================+=================+======================================================+ + | 1 310P3 | OK | 15.4 43 0 / 0 | + | 0 1 | 0000:89:00.0 | 0 1070 / 21534 | + +===================+=================+======================================================+ + ``` + + 3. 执行ATC命令。 + (1)导出text模型 + ``` + + atc --framework=5 \ + --model=open_clip.txt.fp32.onnx \ + --output=open_clip.txt.fp32 \ + --input_format=NCHW \ + --input_shape="text:3,77" \ + --log=debug \ + --soc_version=Ascend310P3 + + + ``` + (2)导出image模型 + ``` + + atc --framework=5 \ + --model=open_clip.image.fp32.onnx \ + --output=open_clip.image.fp32 \ + --input_format=NCHW \ + --input_shape="image:1,3,224,224" \ + --log=debug \ + --soc_version=Ascend310P3 + + + ``` + - 参数说明: + + - --model:为ONNX模型文件。 + - --framework:5代表ONNX模型。 + - --output:输出的OM模型。 + - --input\_format:输入数据的格式。 + - --input\_shape:输入数据的shape。 + - --log:日志级别。 + - --soc\_version:处理器型号。 + - --out_nodes: 输出节点 + + 运行成功后生成cascadeR101dcn.om模型文件。 + +2. 开始推理验证。 + + 1. 安装ais_bench推理工具。 + + 请访问[ais_bench推理工具](https://gitee.com/ascend/tools/tree/master/ais-bench_workload/tool/ais_bench)代码仓,根据readme文档进行工具安装。 + + 2. 执行推理。 + + + ``` + python -m ais_bench --model ./cascadeR101dcn.om\ + --input ./val2017_bin/\ + --output ./\ + --batchsize 1\ + --outfmt BIN\ + --output_dirname result + ``` + + - 参数说明: + + - --model: OM模型路径。 + - --input: 存放预处理bin文件的目录路径 + - --output: 存放推理结果的目录路径 + - --batchsize:每次输入模型的样本数 + - --outfmt: 推理结果数据的格式 + - --output_dirname: 输出结果子目录 + 推理后的输出默认在当前目录result下。 + + + 3. 精度验证。 + + 运行get_info.py,生成图片数据文件 + + ``` + python get_info.py jpg ./coco/val2017 coco2017_jpg.info + ``` + - 参数说明: + + - --第一个参数:原始数据集 + - --第二个参数:图片数据信息 + + 调用“cascadercnn-dcn_postprocess.py”评测模型的精度。 + + ``` + python cascadercnn-dcn_postprocess.py --bin_data_path=result --prob_thres=0.05 --ifShowDetObj --det_results_path=detection-results --test_annotation=coco2017_jpg.info + ``` + - 参数说明: + + - --bin_data_path: 推理结果。 + - --test_annotatio: 原始图片信息文件。 + - --det_results_path: 后处理输出结果。 + - --ifShowDetObj:是否将box画在图上显示。 + - --prob_thres: 目标框的置信度阈值 + + 评测结果的mAP值需要使用官方的pycocotools工具,首先将后处理输出的txt文件转化为coco数据集评测精度的标准json格式。 + + ``` + python txt_to_json.py --npu_txt_path detection-results --json_output_file coco_detection_aisInfer_result + ``` + - 参数说明: + + - --npu_txt_path: 后处理输出结果 + - --json_output_file: 输出json + + 调用coco_eval.py脚本,输出推理结果的详细评测报告。 + + ``` + python coco_eval.py --detection_result coco_detection_aisInfer_result.json --ground_truth ./coco/annotations/instances_val2017.json + ``` + - 参数说明: + + - --detection_result: 输出json + - --ground_truth: 标签 + + +​ + 4. 性能验证。 + + 可使用ais_bench推理工具的纯推理模式验证不同batch_size的om模型的性能,参考命令如下: + + ``` + python -m ais_bench --model ./cascadeR101dcn.om --loop 100 --batchsize 1 + ``` + + - 参数说明: + + - --model: om模型 + - --batchsize: 每次输入模型样本数 + - --loop: 循环次数 + + + +# 模型推理性能&精度 + +调用ACL接口推理计算,性能参考下列数据。 + +1. 精度对比 + + | Model | batchsize | Accuracy | + | ----------- | --------- | -------- | + | Cascade_rcnn_r101-fpn| 1 | bbox_mAP = 0.45 | + +2. 性能对比 + + | batchsize | 310*4 性能 | 310P3 性能 | 310B1性能 | + | ---- | ---- | ---- | ---- | + | 1 | 2.6 |3.8|0.853| \ No newline at end of file -- Gitee From 28017e46fc8bb0bc348bdbf046168af2fa39ae85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=BF=8E=E6=8E=A5=E5=85=89=E8=BE=89=E5=B2=81=E6=9C=88?= Date: Mon, 4 Sep 2023 02:19:49 +0000 Subject: [PATCH 05/12] add ACL_PyTorch/built-in/foundation_models/open_clip/preprocess.py. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 迎接光辉岁月 --- .../foundation_models/open_clip/preprocess.py | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 ACL_PyTorch/built-in/foundation_models/open_clip/preprocess.py diff --git a/ACL_PyTorch/built-in/foundation_models/open_clip/preprocess.py b/ACL_PyTorch/built-in/foundation_models/open_clip/preprocess.py new file mode 100644 index 0000000000..1bdbd955bf --- /dev/null +++ b/ACL_PyTorch/built-in/foundation_models/open_clip/preprocess.py @@ -0,0 +1,44 @@ +import torch +import numpy as np +from PIL import Image +import open_clip +import argparse +model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') +tokenizer = open_clip.get_tokenizer('ViT-B-32') +image = preprocess(Image.open("CLIP.png")).unsqueeze(0) +text = tokenizer(["a diagram", "a dog", "a cat"]) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--image_features_path", + default='./npy/image_features', + type=str, + help="Path of the image_features" + ) + parser.add_argument( + "--text_features_path", + default='./npy/text_features', + type=str, + help="Path of the text_features." + ) + args = parser.parse_args() + return args + + + +if __name__ == '__main__': + args = parse_args() + with torch.no_grad(), torch.cuda.amp.autocast(): + image_features = model.encode_image(image) + text_features = model.encode_text(text) + np.save(args.image_features_path, image_features) + np.save(args.text_features_path, text_features) + image_features /= image_features.norm(dim=-1, keepdim=True) + text_features /= text_features.norm(dim=-1, keepdim=True) + + text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1) + + print("Label probs:", text_probs) # prints: [[1., 0., 0.]] + -- Gitee From 4df07d6d590b7b167f97c02a4237e5d6318d6ba7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=BF=8E=E6=8E=A5=E5=85=89=E8=BE=89=E5=B2=81=E6=9C=88?= Date: Mon, 4 Sep 2023 02:31:34 +0000 Subject: [PATCH 06/12] update ACL_PyTorch/built-in/foundation_models/open_clip/preprocess.py. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 迎接光辉岁月 --- .../built-in/foundation_models/open_clip/preprocess.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/ACL_PyTorch/built-in/foundation_models/open_clip/preprocess.py b/ACL_PyTorch/built-in/foundation_models/open_clip/preprocess.py index 1bdbd955bf..47bd6fa6aa 100644 --- a/ACL_PyTorch/built-in/foundation_models/open_clip/preprocess.py +++ b/ACL_PyTorch/built-in/foundation_models/open_clip/preprocess.py @@ -3,11 +3,6 @@ import numpy as np from PIL import Image import open_clip import argparse -model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') -tokenizer = open_clip.get_tokenizer('ViT-B-32') -image = preprocess(Image.open("CLIP.png")).unsqueeze(0) -text = tokenizer(["a diagram", "a dog", "a cat"]) - def parse_args(): parser = argparse.ArgumentParser() @@ -30,6 +25,10 @@ def parse_args(): if __name__ == '__main__': args = parse_args() + model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') + tokenizer = open_clip.get_tokenizer('ViT-B-32') + image = preprocess(Image.open("CLIP.png")).unsqueeze(0) + text = tokenizer(["a diagram", "a dog", "a cat"]) with torch.no_grad(), torch.cuda.amp.autocast(): image_features = model.encode_image(image) text_features = model.encode_text(text) -- Gitee From 7dcacf08d87daa6c1eb8b2beb114e696ead6e183 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=BF=8E=E6=8E=A5=E5=85=89=E8=BE=89=E5=B2=81=E6=9C=88?= Date: Mon, 4 Sep 2023 02:42:17 +0000 Subject: [PATCH 07/12] update ACL_PyTorch/built-in/foundation_models/open_clip/readme.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 迎接光辉岁月 --- .../foundation_models/open_clip/readme.md | 105 ++++-------------- 1 file changed, 19 insertions(+), 86 deletions(-) diff --git a/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md index bd48639d88..b161c13993 100644 --- a/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md +++ b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md @@ -94,9 +94,13 @@ ## 准备demo数据 1.数据前处理 ``` - python3 preprocess.py --convert-text --save-onnx-path ./open_clip --convert-vision - + mkdir npy + python3 preprocess.py --image_features_path ./npy/image_features.npy --text_features_path ./npy/text_features.npy ``` + - 参数说明: + - --image_features_path: image_features特征文件保存路径 + - --text_features_path: text_features特征文件保存路径 + ## 模型推理 1. 模型转换。 @@ -176,7 +180,6 @@ - --soc\_version:处理器型号。 - --out_nodes: 输出节点 - 运行成功后生成cascadeR101dcn.om模型文件。 2. 开始推理验证。 @@ -188,14 +191,21 @@ ``` - python -m ais_bench --model ./cascadeR101dcn.om\ - --input ./val2017_bin/\ - --output ./\ - --batchsize 1\ - --outfmt BIN\ + python -m ais_bench --model ./open_clip.image.fp32.om \ + --input ./npy/image_features.npy \ + --output ./ \ + --batchsize 1 \ + --outfmt BIN \ + --output_dirname result + ``` + ``` + python -m ais_bench --model ./open_clip.txt.fp32.om \ + --input ./npy/text_features.npy \ + --output ./ \ + --batchsize 1 \ + --outfmt BIN \ --output_dirname result ``` - - 参数说明: - --model: OM模型路径。 @@ -207,81 +217,4 @@ 推理后的输出默认在当前目录result下。 - 3. 精度验证。 - - 运行get_info.py,生成图片数据文件 - - ``` - python get_info.py jpg ./coco/val2017 coco2017_jpg.info - ``` - - 参数说明: - - - --第一个参数:原始数据集 - - --第二个参数:图片数据信息 - - 调用“cascadercnn-dcn_postprocess.py”评测模型的精度。 - - ``` - python cascadercnn-dcn_postprocess.py --bin_data_path=result --prob_thres=0.05 --ifShowDetObj --det_results_path=detection-results --test_annotation=coco2017_jpg.info - ``` - - 参数说明: - - - --bin_data_path: 推理结果。 - - --test_annotatio: 原始图片信息文件。 - - --det_results_path: 后处理输出结果。 - - --ifShowDetObj:是否将box画在图上显示。 - - --prob_thres: 目标框的置信度阈值 - - 评测结果的mAP值需要使用官方的pycocotools工具,首先将后处理输出的txt文件转化为coco数据集评测精度的标准json格式。 - - ``` - python txt_to_json.py --npu_txt_path detection-results --json_output_file coco_detection_aisInfer_result - ``` - - 参数说明: - - - --npu_txt_path: 后处理输出结果 - - --json_output_file: 输出json - - 调用coco_eval.py脚本,输出推理结果的详细评测报告。 - - ``` - python coco_eval.py --detection_result coco_detection_aisInfer_result.json --ground_truth ./coco/annotations/instances_val2017.json - ``` - - 参数说明: - - - --detection_result: 输出json - - --ground_truth: 标签 - - -​ - 4. 性能验证。 - - 可使用ais_bench推理工具的纯推理模式验证不同batch_size的om模型的性能,参考命令如下: - - ``` - python -m ais_bench --model ./cascadeR101dcn.om --loop 100 --batchsize 1 - ``` - - - 参数说明: - - - --model: om模型 - - --batchsize: 每次输入模型样本数 - - --loop: 循环次数 - - - -# 模型推理性能&精度 - -调用ACL接口推理计算,性能参考下列数据。 - -1. 精度对比 - - | Model | batchsize | Accuracy | - | ----------- | --------- | -------- | - | Cascade_rcnn_r101-fpn| 1 | bbox_mAP = 0.45 | - -2. 性能对比 - | batchsize | 310*4 性能 | 310P3 性能 | 310B1性能 | - | ---- | ---- | ---- | ---- | - | 1 | 2.6 |3.8|0.853| \ No newline at end of file -- Gitee From 6270af23e386ea650dc18b24c0fca0d75e4fc847 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=BF=8E=E6=8E=A5=E5=85=89=E8=BE=89=E5=B2=81=E6=9C=88?= Date: Mon, 4 Sep 2023 02:55:07 +0000 Subject: [PATCH 08/12] update ACL_PyTorch/built-in/foundation_models/open_clip/readme.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 迎接光辉岁月 --- .../foundation_models/open_clip/readme.md | 42 ++++++++----------- 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md index b161c13993..a680a88e09 100644 --- a/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md +++ b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md @@ -87,8 +87,6 @@ git clone https://github.com/mlfoundations/open_clip cd open_clip git reset --hard c22a8ecaf95ace2e1ac785e3384689c03754bd40 - - ``` ## 准备demo数据 @@ -107,10 +105,10 @@ 运行export.py文件,自动下载权重并转换为.onnx文件,再使用ATC工具将.onnx文件转为离线推理模型文件.om文件。 2. 导出onnx文件。 - ``` + ``` python3 export.py --convert-text --save-onnx-path ./open_clip --convert-vision - ``` + ``` - 参数说明: - --convert-text : 模型大小 - --convert-vision : 输出onnx模型 @@ -144,30 +142,25 @@ 3. 执行ATC命令。 (1)导出text模型 - ``` - - atc --framework=5 \ - --model=open_clip.txt.fp32.onnx \ - --output=open_clip.txt.fp32 \ - --input_format=NCHW \ - --input_shape="text:3,77" \ - --log=debug \ - --soc_version=Ascend310P3 - + ``` + atc --framework=5 \ + --model=open_clip.txt.fp32.onnx \ + --output=open_clip.txt.fp32 \ + --input_format=NCHW \ + --input_shape="text:3,77" \ + --log=debug \ + --soc_version=Ascend310P3 ``` (2)导出image模型 ``` - - atc --framework=5 \ - --model=open_clip.image.fp32.onnx \ - --output=open_clip.image.fp32 \ - --input_format=NCHW \ - --input_shape="image:1,3,224,224" \ - --log=debug \ - --soc_version=Ascend310P3 - - + atc --framework=5 \ + --model=open_clip.image.fp32.onnx \ + --output=open_clip.image.fp32 \ + --input_format=NCHW \ + --input_shape="image:1,3,224,224" \ + --log=debug \ + --soc_version=Ascend310P3 ``` - 参数说明: @@ -198,6 +191,7 @@ --outfmt BIN \ --output_dirname result ``` + ``` python -m ais_bench --model ./open_clip.txt.fp32.om \ --input ./npy/text_features.npy \ -- Gitee From b686db4638cde3952da3e98e776d168236ceda7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=BF=8E=E6=8E=A5=E5=85=89=E8=BE=89=E5=B2=81=E6=9C=88?= Date: Mon, 4 Sep 2023 02:56:02 +0000 Subject: [PATCH 09/12] update ACL_PyTorch/built-in/foundation_models/open_clip/readme.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 迎接光辉岁月 --- ACL_PyTorch/built-in/foundation_models/open_clip/readme.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md index a680a88e09..42bfbb9143 100644 --- a/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md +++ b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md @@ -91,10 +91,12 @@ ## 准备demo数据 1.数据前处理 + ``` mkdir npy python3 preprocess.py --image_features_path ./npy/image_features.npy --text_features_path ./npy/text_features.npy ``` + - 参数说明: - --image_features_path: image_features特征文件保存路径 - --text_features_path: text_features特征文件保存路径 -- Gitee From 4a0685bbbaa6b9dcea9ef0bc7bc22086cb70ee52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=BF=8E=E6=8E=A5=E5=85=89=E8=BE=89=E5=B2=81=E6=9C=88?= Date: Mon, 4 Sep 2023 02:56:25 +0000 Subject: [PATCH 10/12] update ACL_PyTorch/built-in/foundation_models/open_clip/readme.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 迎接光辉岁月 --- ACL_PyTorch/built-in/foundation_models/open_clip/readme.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md index 42bfbb9143..7b89e9d275 100644 --- a/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md +++ b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md @@ -95,6 +95,7 @@ ``` mkdir npy python3 preprocess.py --image_features_path ./npy/image_features.npy --text_features_path ./npy/text_features.npy + ``` - 参数说明: -- Gitee From ae439d9eb30ea15b3cb6fe3fa1cfee7602b4036f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=BF=8E=E6=8E=A5=E5=85=89=E8=BE=89=E5=B2=81=E6=9C=88?= Date: Mon, 4 Sep 2023 02:57:04 +0000 Subject: [PATCH 11/12] update ACL_PyTorch/built-in/foundation_models/open_clip/readme.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 迎接光辉岁月 --- ACL_PyTorch/built-in/foundation_models/open_clip/readme.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md index 7b89e9d275..c7d828ea1b 100644 --- a/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md +++ b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md @@ -82,7 +82,6 @@ ``` 2. 获取源码。 - 1. 安装开源仓 ``` git clone https://github.com/mlfoundations/open_clip cd open_clip @@ -90,6 +89,7 @@ ``` ## 准备demo数据 + 1.数据前处理 ``` @@ -98,7 +98,7 @@ ``` - - 参数说明: + - 参数说明: - --image_features_path: image_features特征文件保存路径 - --text_features_path: text_features特征文件保存路径 -- Gitee From 715601a2ab079defbad65f06ffa79e55fc93650c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=BF=8E=E6=8E=A5=E5=85=89=E8=BE=89=E5=B2=81=E6=9C=88?= Date: Mon, 4 Sep 2023 02:58:38 +0000 Subject: [PATCH 12/12] update ACL_PyTorch/built-in/foundation_models/open_clip/readme.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 迎接光辉岁月 --- ACL_PyTorch/built-in/foundation_models/open_clip/readme.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md index c7d828ea1b..91116f4f7b 100644 --- a/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md +++ b/ACL_PyTorch/built-in/foundation_models/open_clip/readme.md @@ -93,14 +93,11 @@ 1.数据前处理 ``` - mkdir npy - python3 preprocess.py --image_features_path ./npy/image_features.npy --text_features_path ./npy/text_features.npy + mkdir npy + python3 preprocess.py --image_features_path ./npy/image_features.npy --text_features_path ./npy/text_features.npy ``` - - 参数说明: - - --image_features_path: image_features特征文件保存路径 - - --text_features_path: text_features特征文件保存路径 ## 模型推理 -- Gitee