diff --git a/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/README.md b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5580149c28e035ef09380e900573f659ca8348a8 --- /dev/null +++ b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/README.md @@ -0,0 +1,35 @@ +## 构建虚拟环境 + +`conda create --name mobilenetV1 python=3.9` +激活:`conda activate mobilenetV1` + +## 安装依赖 + +`pip3 install -r requirements.txt` + +编译pt插件,在dist目录下安装torh_aie + +## 下载pth模型 + +自行下载模型tar文件并放置在mobilenetv1路径下 +链接:https://ascend-repo-modelzoo.obs.cn-east-2.myhuaweicloud.com/model/1_PyTorch_PTH/MobileNetV1/PTH/mobilenet_sgd_rmsprop_69.526.tar + +## trace得到ts文件 + +将model_path改为自己tar模型的路径 +`python3 export.py --model_path=./mobilenet_sgd_rmsprop_69.526.tar` + +## 数据预处理 + +将src_path改为自己目录下数据集图片的路径 +`python3 imagenet_torch_preprocess.py --mode_type resnet --src_path /home/pttest_models/imagenet/val --save_path ./prep_dataset` + +## 模型推理 - 获取精度 + +将annotation_file_path改为自己目录下数据集label的路径 +`python3 run.py --annotation_file_path /home/pttest_models/imagenet/val_label.txt --result_json_path ./ --json_file_name result_torchaie.json --ts_model_path ./mobilenetv1.ts --input_bin_folder_path ./prep_dataset` + +## 推理性能 - ts + +将--ts_path改为自己目录下的ts路径 +`python3 perf.py --mode=ts --ts_path=/onnx/mobilenetv1/mobilenetv1.ts` \ No newline at end of file diff --git a/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/export.py b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/export.py new file mode 100644 index 0000000000000000000000000000000000000000..8521411b51e32df4c0657bd27a09b9416ca25b3d --- /dev/null +++ b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/export.py @@ -0,0 +1,109 @@ +import os +import sys +import argparse + +import torch +import torch.nn as nn +from collections import OrderedDict + +def parse_args(): + parser = argparse.ArgumentParser(description='Export MobilenetV1 .ts model file') + parser.add_argument('--model_path',help='MobilenetV1 pth file path', type=str, + default='/onnx/mobilenetv1/mobilenet_sgd_rmsprop_69.526.tar' + ) + parser.add_argument('--ts_save_path', help='MobilenetV1 torch script model save path', type=str, + default='mobilenetv1.ts') + + args = parser.parse_args() + return args + +def check_args(args): + if not os.path.exists(args.model_path): + raise FileNotFoundError(f'MobilenetV1 model file {args.model_path} not exists') + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + + def conv_bn(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True) + ) + + def conv_dw(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), + nn.BatchNorm2d(inp), + nn.ReLU(inplace=True), + + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True), + ) + + self.model = nn.Sequential( + conv_bn(3, 32, 2), + conv_dw(32, 64, 1), + conv_dw(64, 128, 2), + conv_dw(128, 128, 1), + conv_dw(128, 256, 2), + conv_dw(256, 256, 1), + conv_dw(256, 512, 2), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 1024, 2), + conv_dw(1024, 1024, 1), + nn.AvgPool2d(7), + ) + self.fc = nn.Linear(1024, 1000) + + def forward(self, x): + x = self.model(x) + x = x.view(-1, 1024) + x = self.fc(x) + return x + + +def proc_nodes_module(checkpoint, AttrName): + new_state_dict = OrderedDict() + for k, v in checkpoint[AttrName].items(): + if k[0:7] == "module.": + name = k[7:] + else: + name = k[0:] + new_state_dict[name] = v + return new_state_dict + +def trace_ts_model(model_path, ts_save_path): + + checkpoint = torch.load(model_path, map_location="cpu") + + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict'].items(): + if k[0:7] == "module.": + name = k[7:] + else: + name = k[0:] + new_state_dict[name] = v + + model = Net() + if new_state_dict: + model.load_state_dict(new_state_dict) + model.eval() + + input_data = torch.ones(1, 3, 224, 224) + ts_model = torch.jit.trace(model, input_data) + ts_model.save(ts_save_path) + print(f'MobilenetV1 torch script model saved to {ts_save_path}') + + +if __name__ == "__main__": + args = parse_args() + check_args(args) + trace_ts_model(args.model_path, args.ts_save_path) + print("Finish Tracing MobilenetV1 model") \ No newline at end of file diff --git a/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/imagenet_acc_eval.py b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/imagenet_acc_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..583340a19f2fc6e99faed85526c906f8bd12d7ba --- /dev/null +++ b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/imagenet_acc_eval.py @@ -0,0 +1,184 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import json +import numpy as np +import time + +np.set_printoptions(threshold=sys.maxsize) + +LABEL_FILE = "HiAI_label.json" + + +def gen_file_name(img_name): + full_name = img_name.split('/')[-1] + index = full_name.rfind('.') + return full_name[:index] + + +def cre_groundtruth_dict(gtfile_path): + """ + :param filename: file contains the imagename and label number + :return: dictionary key imagename, value is label number + """ + img_gt_dict = {} + for gtfile in os.listdir(gtfile_path): + if (gtfile != LABEL_FILE): + with open(os.path.join(gtfile_path, gtfile), 'r') as f: + gt = json.load(f) + ret = gt["image"]["annotations"][0]["category_id"] + img_gt_dict[gen_file_name(gtfile)] = ret + return img_gt_dict + + +def cre_groundtruth_dict_fromtxt(gtfile_path): + """ + :param filename: file contains the imagename and label number + :return: dictionary key imagename, value is label number + """ + img_gt_dict = {} + with open(gtfile_path, 'r')as f: + for line in f.readlines(): + temp = line.strip().split(" ") + imgName = temp[0].split(".")[0] + imgLab = temp[1] + img_gt_dict[imgName] = imgLab + return img_gt_dict + + +def load_statistical_predict_result(filepath): + """ + function: + the prediction esult file data extraction + input: + result file:filepath + output: + n_label:numble of label + data_vec: the probabilitie of prediction in the 1000 + :return: probabilities, numble of label, in_type, color + """ + with open(filepath, 'r')as f: + data = f.readline() + temp = data.strip().split(" ") + n_label = len(temp) + if data == '': + n_label = 0 + data_vec = np.zeros((n_label), dtype=np.float32) + in_type = '' + color = '' + if n_label == 0: + in_type = f.readline() + color = f.readline() + else: + for ind, prob in enumerate(temp): + data_vec[ind] = np.float32(prob) + return data_vec, n_label, in_type, color + + +def create_visualization_statistical_result(prediction_file_path, + result_store_path, json_file_name, + img_gt_dict, topn=5): + """ + :param prediction_file_path: + :param result_store_path: + :param json_file_name: + :param img_gt_dict: + :param topn: + :return: + """ + writer = open(os.path.join(result_store_path, json_file_name), 'w') + table_dict = {} + table_dict["title"] = "Overall statistical evaluation" + table_dict["value"] = [] + + count = 0 + resCnt = 0 + n_labels = 0 + count_hit = np.zeros(topn) + for tfile_name in os.listdir(prediction_file_path): + count += 1 + temp = tfile_name.split('.')[0] + index = temp.rfind('_') + img_name = temp[:index] + filepath = os.path.join(prediction_file_path, tfile_name) + ret = load_statistical_predict_result(filepath) + prediction = ret[0] + n_labels = ret[1] + sort_index = np.argsort(-prediction) + gt = img_gt_dict[img_name] + if (n_labels == 1000): + realLabel = int(gt) + elif (n_labels == 1001): + realLabel = int(gt) + 1 + else: + realLabel = int(gt) + + resCnt = min(len(sort_index), topn) + for i in range(resCnt): + if (str(realLabel) == str(sort_index[i])): + count_hit[i] += 1 + break + + if 'value' not in table_dict.keys(): + print("the item value does not exist!") + else: + table_dict["value"].extend( + [{"key": "Number of images", "value": str(count)}, + {"key": "Number of classes", "value": str(n_labels)}]) + if count == 0: + accuracy = 0 + else: + accuracy = np.cumsum(count_hit) / count + for i in range(resCnt): + table_dict["value"].append({"key": "Top" + str(i + 1) + " accuracy", + "value": str( + round(accuracy[i] * 100, 2)) + '%'}) + json.dump(table_dict, writer) + writer.close() + + +if __name__ == '__main__': + start = time.time() + try: + # txt file path + folder_davinci_target = sys.argv[1] + # annotation files path, "val_label.txt" + annotation_file_path = sys.argv[2] + # the path to store the results json path + result_json_path = sys.argv[3] + # result json file name + json_file_name = sys.argv[4] + except IndexError: + print("Stopped!") + exit(1) + + if not (os.path.exists(folder_davinci_target)): + print("target file folder does not exist.") + + if not (os.path.exists(annotation_file_path)): + print("Ground truth file does not exist.") + + if not (os.path.exists(result_json_path)): + print("Result folder doesn't exist.") + + img_label_dict = cre_groundtruth_dict_fromtxt(annotation_file_path) + create_visualization_statistical_result(folder_davinci_target, + result_json_path, json_file_name, + img_label_dict, topn=5) + + elapsed = (time.time() - start) + print("Time used:", elapsed) + diff --git a/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/imagenet_torch_preprocess.py b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/imagenet_torch_preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..3938928ac1ada0a0d3e0892da9ede0144432b87a --- /dev/null +++ b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/imagenet_torch_preprocess.py @@ -0,0 +1,132 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import glob +import argparse + +from PIL import Image +import numpy as np +import multiprocessing + + +model_config = { + 'resnet': { + 'resize': 256, + 'centercrop': 224, + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + }, + 'inceptionv3': { + 'resize': 342, + 'centercrop': 299, + 'mean': [0.485, 0.456, 0.406], + 'std': [0.229, 0.224, 0.225], + }, + 'inceptionv4': { + 'resize': 342, + 'centercrop': 299, + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + }, +} + + +def center_crop(img, output_size): + if isinstance(output_size, int): + output_size = (int(output_size), int(output_size)) + image_width, image_height = img.size + crop_height, crop_width = output_size + crop_top = int(round((image_height - crop_height) / 2.)) + crop_left = int(round((image_width - crop_width) / 2.)) + return img.crop((crop_left, crop_top, crop_left + crop_width, crop_top + crop_height)) + + +def resize(img, size, interpolation=Image.BILINEAR): + if isinstance(size, int): + w, h = img.size + if (w <= h and w == size) or (h <= w and h == size): + return img + if w < h: + ow = size + oh = int(size * h / w) + return img.resize((ow, oh), interpolation) + else: + oh = size + ow = int(size * w / h) + return img.resize((ow, oh), interpolation) + else: + return img.resize(size[::-1], interpolation) + + +def gen_input_bin(mode_type_name, file_batches, batch, save_path_name): + i = 0 + for files in file_batches[batch]: + i = i + 1 + print("batch", batch, files, "===", i) + + # RGBA to RGB + image = Image.open(os.path.join(src_path, files)).convert('RGB') + image = resize(image, model_config[mode_type_name]['resize']) # Resize + image = center_crop(image, model_config[mode_type_name]['centercrop']) # CenterCrop + img = np.array(image, dtype=np.float32) + img = img.transpose(2, 0, 1) # ToTensor: HWC -> CHW + img = img / 255. # ToTensor: div 255 + img -= np.array(model_config[mode_type_name]['mean'], dtype=np.float32)[:, None, None] # Normalize: mean + img /= np.array(model_config[mode_type_name]['std'], dtype=np.float32)[:, None, None] # Normalize: std + print(img.shape) + print(img.dtype) + img.tofile(os.path.join(save_path_name, files.split('.')[0].split('/')[-1] + ".bin")) + + +def preprocess(mode_type_name, src_path_name, save_path_name): + files = glob.glob(f"{src_path_name}/*.JPEG") + glob.glob(f"{src_path_name}/*/*.JPEG") + file_batches = [files[i:i + 500] for i in range(0, 50000, 500) if files[i:i + 500] != []] + print(len(file_batches)) + thread_pool = multiprocessing.Pool(int(multiprocessing.cpu_count())) + for batch in range(len(file_batches)): + thread_pool.apply_async(gen_input_bin, args=(mode_type_name, file_batches, batch, save_path_name)) + thread_pool.close() + thread_pool.join() + print("in thread, except will not report! please ensure bin files generated.") + + +if __name__ == '__main__': + # if len(sys.argv) < 4: + # raise Exception("usage: python3 xxx.py [model_type] [src_path] [save_path]") + # mode_type = sys.argv[1] + # src_path = sys.argv[2] + # save_path = sys.argv[3] + + parser = argparse.ArgumentParser() + parser.add_argument("--mode_type", default = "resnet", type=str) + parser.add_argument("--src_path", default="/home/ascend/imagenet/val") + parser.add_argument("--save_path", default="./prep_dataset") + args = parser.parse_args() + mode_type = args.mode_type + src_path = args.src_path + save_path = args.save_path + + src_path = os.path.realpath(src_path) + if mode_type not in model_config: + model_type_help = "model type: " + for key in model_config.keys(): + model_type_help += key + model_type_help += ' ' + raise Exception(model_type_help) + if not os.path.isdir(save_path): + os.makedirs(os.path.realpath(save_path)) + preprocess(mode_type, src_path, save_path) + diff --git a/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/mobilenet-v1_pth2onnx.py b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/mobilenet-v1_pth2onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..f254c26ddfc1cf0b7fafd9d8a34eeda0813d9609 --- /dev/null +++ b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/mobilenet-v1_pth2onnx.py @@ -0,0 +1,107 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.onnx + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + + def conv_bn(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True) + ) + + def conv_dw(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), + nn.BatchNorm2d(inp), + nn.ReLU(inplace=True), + + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True), + ) + + self.model = nn.Sequential( + conv_bn(3, 32, 2), + conv_dw(32, 64, 1), + conv_dw(64, 128, 2), + conv_dw(128, 128, 1), + conv_dw(128, 256, 2), + conv_dw(256, 256, 1), + conv_dw(256, 512, 2), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 1024, 2), + conv_dw(1024, 1024, 1), + nn.AvgPool2d(7), + ) + self.fc = nn.Linear(1024, 1000) + + def forward(self, x): + x = self.model(x) + x = x.view(-1, 1024) + x = self.fc(x) + return x + + +def proc_nodes_module(checkpoint, AttrName): + new_state_dict = OrderedDict() + for k, v in checkpoint[AttrName].items(): + if k[0:7] == "module.": + name = k[7:] + else: + name = k[0:] + new_state_dict[name] = v + return new_state_dict + + +def convert_model_to_onnx(model_state, output_file): + model = Net() + if model_state: + model.load_state_dict(model_state) + model.eval() + input_names = ["image"] + output_names = ["class"] + dynamic_axes = {'image': {0: '-1'}, 'class': {0: '-1'}} + dummy_input = torch.randn(32, 3, 224, 224) # (batch_size, channels, width, height) + torch.onnx.export(model, dummy_input, output_file, input_names=input_names, dynamic_axes=dynamic_axes, + output_names=output_names, opset_version=11, verbose=True) + + +if __name__ == '__main__': + checkpoint_file = sys.argv[1] + output_file = sys.argv[2] + + if os.path.isfile(checkpoint_file): + checkpoint = torch.load(checkpoint_file, map_location='cpu') + print("{} successfully loaded.".format(checkpoint_file)) + model_state = proc_nodes_module(checkpoint, 'state_dict') + else: + print("Failed to load checkpoint from {}! Output model with initial state.".format(checkpoint_file)) + model_state = OrderedDict() + convert_model_to_onnx(model_state, output_file) diff --git a/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/perf.py b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/perf.py new file mode 100644 index 0000000000000000000000000000000000000000..ba3ebc6b6572cae0b6c8e88284a7d74628486bb0 --- /dev/null +++ b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/perf.py @@ -0,0 +1,102 @@ +import argparse +import time +from tqdm import tqdm + +import torch +import numpy as np + +import torch_aie +from torch_aie import _enums + +from ais_bench.infer.interface import InferSession + +INPUT_WIDTH = 224 +INPUT_HEIGHT = 224 + +def parse_args(): + args = argparse.ArgumentParser(description="A program that operates in 'om' or 'ts' mode.") + args.add_argument("--mode", choices=["om", "ts"], required=True, help="Specify the mode ('om' or 'ts').") + args.add_argument('--om_path',help='MobilenetV1 om file path', type=str, + default='/onnx/mobilenetv1/mobilenet-v1_bs1.om' + ) + args.add_argument('--ts_path',help='MobilenetV1 ts file path', type=str, + default='/onnx/mobilenetv1/mobilenetv1.ts' + ) + args.add_argument("--batch_size", type=int, default=4, help="batch size.") + args.add_argument("--opt_level", type=int, default=0, help="opt level.") + return args.parse_args() + +if __name__ == '__main__': + infer_times = 100 + om_cost = 0 + pt_cost = 0 + opts = parse_args() + OM_PATH = opts.om_path + TS_PATH = opts.ts_path + BATCH_SIZE = opts.batch_size + OPTS_LEVEL = opts.opt_level + warm_ctr = 10 + + if opts.mode == "om": + om_model = InferSession(0, OM_PATH) + + while warm_ctr: + dummy_input = np.random.randn(BATCH_SIZE, 3, INPUT_WIDTH, INPUT_HEIGHT).astype(np.float32) + output = om_model.infer([dummy_input], 'static', custom_sizes=90000000) + warm_ctr -= 1 + + for _ in tqdm(range(0, infer_times)): + dummy_input = np.random.randn(BATCH_SIZE, 3, INPUT_WIDTH, INPUT_HEIGHT).astype(np.float32) + start = time.time() + output = om_model.infer([dummy_input], 'static', custom_sizes=90000000) + cost = time.time() - start + om_cost += cost + print(f"fps: {infer_times} * {BATCH_SIZE} / {om_cost : .3f} samples/s") + print("om fps: ", infer_times * BATCH_SIZE / om_cost) + + if opts.mode == "ts": + ts_model = torch.jit.load(TS_PATH) + + input_info = [torch_aie.Input((BATCH_SIZE, 3, INPUT_WIDTH, INPUT_HEIGHT))] + + torch_aie.set_device(0) + print("start compile") + torchaie_model = torch_aie.compile( + ts_model, + inputs=input_info, + precision_policy=_enums.PrecisionPolicy.FP16, + soc_version='Ascend310P3', + optimization_level=OPTS_LEVEL, + ) + print("end compile") + torchaie_model.eval() + + dummy_input = np.random.randn(BATCH_SIZE, 3, INPUT_WIDTH, INPUT_HEIGHT).astype(np.float32) + input_tensor = torch.Tensor(dummy_input) + input_tensor = input_tensor.to("npu:0") + loops = 100 + warm_ctr = 10 + + default_stream = torch_aie.npu.default_stream() + time_cost = 0 + + while warm_ctr: + _ = torchaie_model(input_tensor) + default_stream.synchronize() + warm_ctr -= 1 + + for i in range(loops): + t0 = time.time() + _ = torchaie_model(input_tensor) + default_stream.synchronize() + t1 = time.time() + time_cost += (t1 - t0) + # print(i) + + print(f"fps: {loops} * {BATCH_SIZE} / {time_cost : .3f} samples/s") + print("torch_aie fps: ", loops * BATCH_SIZE / time_cost) + from datetime import datetime + current_time = datetime.now() + formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S") + print("Current Time:", formatted_time) + torch_aie.finalize() \ No newline at end of file diff --git a/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/pytorch-mobilenet-v1-master/README.md b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/pytorch-mobilenet-v1-master/README.md new file mode 100644 index 0000000000000000000000000000000000000000..08c74cbfc147d6d0ea435a20a0392790e7954485 --- /dev/null +++ b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/pytorch-mobilenet-v1-master/README.md @@ -0,0 +1,81 @@ +Implementation of MobileNet, modified from https://github.com/pytorch/examples/tree/master/imagenet. +imagenet data is processed [as described here](https://github.com/facebook/fb.resnet.torch/blob/master/INSTALL.md#download-the-imagenet-dataset) + + +nohup python main.py -a mobilenet ImageNet-Folder > log.txt & + +Results +- sgd : top1 68.848 top5 88.740 [download](https://pan.baidu.com/s/1nuRcK3Z) +- rmsprop: top1 0.104 top5 0.494 +- rmsprop init from sgd : top1 69.526 top5 88.978 [donwload](https://pan.baidu.com/s/1eRCxYKU) +- paper: top1 70.6 + +Benchmark: + +Titan-X, batchsize = 16 +``` + resnet18 : 0.004030 + alexnet : 0.001395 + vgg16 : 0.002310 +squeezenet : 0.009848 + mobilenet : 0.073611 +``` +Titan-X, batchsize = 1 +``` + resnet18 : 0.003688 + alexnet : 0.001179 + vgg16 : 0.002055 +squeezenet : 0.003385 + mobilenet : 0.076977 +``` + +--------- + +``` +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + + def conv_bn(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True) + ) + + def conv_dw(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), + nn.BatchNorm2d(inp), + nn.ReLU(inplace=True), + + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True), + ) + + self.model = nn.Sequential( + conv_bn( 3, 32, 2), + conv_dw( 32, 64, 1), + conv_dw( 64, 128, 2), + conv_dw(128, 128, 1), + conv_dw(128, 256, 2), + conv_dw(256, 256, 1), + conv_dw(256, 512, 2), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 1024, 2), + conv_dw(1024, 1024, 1), + nn.AvgPool2d(7), + ) + self.fc = nn.Linear(1024, 1000) + + def forward(self, x): + x = self.model(x) + x = x.view(-1, 1024) + x = self.fc(x) + return x +``` diff --git a/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/pytorch-mobilenet-v1-master/benchmark.py b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/pytorch-mobilenet-v1-master/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..7db54cf0cce865ffeb0dd360bd0373299d66f2f2 --- /dev/null +++ b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/pytorch-mobilenet-v1-master/benchmark.py @@ -0,0 +1,81 @@ +import time +import torch +import torch.nn as nn +import torch.backends.cudnn as cudnn +import torchvision.models as models +from torch.autograd import Variable + +class MobileNet(nn.Module): + def __init__(self): + super(MobileNet, self).__init__() + + def conv_bn(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True) + ) + + def conv_dw(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), + nn.BatchNorm2d(inp), + nn.ReLU(inplace=True), + + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True), + ) + + self.model = nn.Sequential( + conv_bn( 3, 32, 2), + conv_dw( 32, 64, 1), + conv_dw( 64, 128, 2), + conv_dw(128, 128, 1), + conv_dw(128, 256, 2), + conv_dw(256, 256, 1), + conv_dw(256, 512, 2), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 1024, 2), + conv_dw(1024, 1024, 1), + nn.AvgPool2d(7), + ) + self.fc = nn.Linear(1024, 1000) + + def forward(self, x): + x = self.model(x) + x = x.view(-1, 1024) + x = self.fc(x) + return x + +def speed(model, name): + t0 = time.time() + input = torch.rand(1,3,224,224).cuda() + input = Variable(input, volatile = True) + t1 = time.time() + + model(input) + t2 = time.time() + + model(input) + t3 = time.time() + + print('%10s : %f' % (name, t3 - t2)) + +if __name__ == '__main__': + #cudnn.benchmark = True # This will make network slow ?? + resnet18 = models.resnet18().cuda() + alexnet = models.alexnet().cuda() + vgg16 = models.vgg16().cuda() + squeezenet = models.squeezenet1_0().cuda() + mobilenet = MobileNet().cuda() + + speed(resnet18, 'resnet18') + speed(alexnet, 'alexnet') + speed(vgg16, 'vgg16') + speed(squeezenet, 'squeezenet') + speed(mobilenet, 'mobilenet') diff --git a/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/pytorch-mobilenet-v1-master/main.py b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/pytorch-mobilenet-v1-master/main.py new file mode 100644 index 0000000000000000000000000000000000000000..0a0c43e587b5ddd6eebf72d8e3805a17f29cc06f --- /dev/null +++ b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/pytorch-mobilenet-v1-master/main.py @@ -0,0 +1,346 @@ +import argparse +import os +import shutil +import time + +import torch +import torch.nn as nn +import torch.nn.parallel +import torch.backends.cudnn as cudnn +import torch.optim +import torch.utils.data +import torchvision.transforms as transforms +import torchvision.datasets as datasets +import torchvision.models as models + + +model_names = sorted(name for name in models.__dict__ + if name.islower() and not name.startswith("__") + and callable(models.__dict__[name])) + +model_names.append('mobilenet') + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') +parser.add_argument('--data', metavar='DIR', default = "/home/jcwang/dataset/imagenet-data", + help='path to dataset') +parser.add_argument('--arch', '-a', metavar='ARCH', default='mobilenet', + choices=model_names, + help='model architecture: ' + + ' | '.join(model_names) + + ' (default: resnet18)') +parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', + help='number of data loading workers (default: 4)') +parser.add_argument('--epochs', default=90, type=int, metavar='N', + help='number of total epochs to run') +parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('-b', '--batch_size', default=64, type=int, + metavar='N', help='mini-batch size (default: 256)') +parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, + metavar='LR', help='initial learning rate') +parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') +parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)') +parser.add_argument('--print-freq', '-p', default=10, type=int, + metavar='N', help='print frequency (default: 10)') +parser.add_argument('--resume', default='./mobilenet_sgd_68.848.pth.tar', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', + default = 1, + help='evaluate model on validation set') +parser.add_argument('--pretrained', dest='pretrained', action='store_true', + help='use pre-trained model') + +best_prec1 = 0 + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + + def conv_bn(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True) + ) + + def conv_dw(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), + nn.BatchNorm2d(inp), + nn.ReLU(inplace=True), + + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + nn.ReLU(inplace=True), + ) + + self.model = nn.Sequential( + conv_bn( 3, 32, 2), + conv_dw( 32, 64, 1), + conv_dw( 64, 128, 2), + conv_dw(128, 128, 1), + conv_dw(128, 256, 2), + conv_dw(256, 256, 1), + conv_dw(256, 512, 2), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 512, 1), + conv_dw(512, 1024, 2), + conv_dw(1024, 1024, 1), + nn.AvgPool2d(7), + ) + self.fc = nn.Linear(1024, 1000) + + def forward(self, x): + x = self.model(x) + x = x.view(-1, 1024) + x = self.fc(x) + return x + +def mobilenet(path="./checkpoint.pth.tar"): + net = Net() + state_dict = torch.load(path) + net.load_state_dict(state_dict) + return net + + +def main(): + global args, best_prec1 + args = parser.parse_args() + + # create model + if args.pretrained: + print("=> using pre-trained model '{}'".format(args.arch)) + model = models.__dict__[args.arch](pretrained=True) + else: + print("=> creating model '{}'".format(args.arch)) + if args.arch.startswith('mobilenet'): + model = Net() + print(model) + else: + model = models.__dict__[args.arch]() + + if args.arch.startswith('alexnet') or args.arch.startswith('vgg'): + model.features = torch.nn.DataParallel(model.features) + model.cuda() + else: + model = torch.nn.DataParallel(model).cuda() + + # define loss function (criterion) and optimizer + criterion = nn.CrossEntropyLoss().cuda() + + optimizer = torch.optim.SGD(model.parameters(), args.lr, + momentum=args.momentum, + weight_decay=args.weight_decay) + + # optionally resume from a checkpoint + if args.resume: + if os.path.isfile(args.resume): + print("=> loading checkpoint '{}'".format(args.resume)) + checkpoint = torch.load(args.resume) + args.start_epoch = checkpoint['epoch'] + best_prec1 = checkpoint['best_prec1'] + model.load_state_dict(checkpoint['state_dict']) + optimizer.load_state_dict(checkpoint['optimizer']) + print("=> loaded checkpoint '{}' (epoch {})" + .format(args.resume, checkpoint['epoch'])) + else: + print("=> no checkpoint found at '{}'".format(args.resume)) + + cudnn.benchmark = True + + # Data loading code + traindir = os.path.join(args.data, 'train') + valdir = os.path.join(args.data, 'val') + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + if args.evaluate: + val_loader = torch.utils.data.DataLoader( + datasets.ImageFolder(valdir, transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])), + batch_size=args.batch_size, shuffle=False, + num_workers=args.workers, pin_memory=True) + validate(val_loader, model, criterion) + return + else: + train_loader = torch.utils.data.DataLoader( + datasets.ImageFolder(traindir, transforms.Compose([ + transforms.RandomSizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ])), + batch_size=args.batch_size, shuffle=True, + num_workers=args.workers, pin_memory=True) + + for epoch in range(args.start_epoch, args.epochs): + adjust_learning_rate(optimizer, epoch) + + # train for one epoch + train(train_loader, model, criterion, optimizer, epoch) + + # evaluate on validation set + prec1 = validate(val_loader, model, criterion) + + # remember best prec@1 and save checkpoint + is_best = prec1 > best_prec1 + best_prec1 = max(prec1, best_prec1) + save_checkpoint({ + 'epoch': epoch + 1, + 'arch': args.arch, + 'state_dict': model.state_dict(), + 'best_prec1': best_prec1, + 'optimizer' : optimizer.state_dict(), + }, is_best) + + +def train(train_loader, model, criterion, optimizer, epoch): + batch_time = AverageMeter() + data_time = AverageMeter() + losses = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + + # switch to train mode + model.train() + + end = time.time() + for i, (input, target) in enumerate(train_loader): + # measure data loading time + data_time.update(time.time() - end) + + target = target.cuda(async=True) + input_var = torch.autograd.Variable(input) + target_var = torch.autograd.Variable(target) + + # compute output + output = model(input_var) + loss = criterion(output, target_var) + + # measure accuracy and record loss + prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) + losses.update(loss.data[0], input.size(0)) + top1.update(prec1[0], input.size(0)) + top5.update(prec5[0], input.size(0)) + + # compute gradient and do SGD step + optimizer.zero_grad() + loss.backward() + optimizer.step() + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + print('Epoch: [{0}][{1}/{2}]\t' + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' + 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' + 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( + epoch, i, len(train_loader), batch_time=batch_time, + data_time=data_time, loss=losses, top1=top1, top5=top5)) + + +def validate(val_loader, model, criterion): + batch_time = AverageMeter() + losses = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + + # switch to evaluate mode + model.eval() + + end = time.time() + for i, (input, target) in enumerate(val_loader): + target = target.cuda(async=True) + input_var = input.cuda() + target_var = target + + # compute output + output = model(input_var) + loss = criterion(output, target_var) + + # measure accuracy and record loss + prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) + losses.update(loss.item(), input.size(0)) + top1.update(prec1.item(), input.size(0)) + top5.update(prec5.item(), input.size(0)) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % args.print_freq == 0: + print('Test: [{0}/{1}]\t' + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' + 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( + i, len(val_loader), batch_time=batch_time, loss=losses, + top1=top1, top5=top5)) + + print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}' + .format(top1=top1, top5=top5)) + + return top1.avg + + +def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): + torch.save(state, filename) + if is_best: + shutil.copyfile(filename, 'model_best.pth.tar') + + +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +def adjust_learning_rate(optimizer, epoch): + """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" + lr = args.lr * (0.1 ** (epoch // 30)) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k""" + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +if __name__ == '__main__': + main() diff --git a/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/pytorch-mobilenet-v1-master/run.sh b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/pytorch-mobilenet-v1-master/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..f31ea31a7226955dd8bf6128abcad821126543e1 --- /dev/null +++ b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/pytorch-mobilenet-v1-master/run.sh @@ -0,0 +1,2 @@ +#python main.py -a alexnet /home/xiaohang/ImageNet/ +nohup python main.py -a mobilenet /home/xiaohang/ImageNet/ > log.txt & diff --git a/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/requirements.txt b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..796465d58538f4133ce49fd6d42c9d66f4bf3506 --- /dev/null +++ b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/requirements.txt @@ -0,0 +1,254 @@ +absl-py==1.4.0 +addict==2.4.0 +aenum==3.1.15 +aiohttp==3.8.6 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.0.0 +appdirs==1.4.4 +argon2-cffi==23.1.0 +argon2-cffi-bindings==21.2.0 +arrow==1.3.0 +ascendie==6.3rc2 +astor==0.8.1 +asttokens==2.2.1 +astunparse==1.6.3 +async-lru==2.0.4 +async-timeout==4.0.3 +attrs==23.1.0 +Babel==2.13.0 +backcall==0.2.0 +bce-python-sdk==0.8.95 +beautifulsoup4==4.12.2 +black==21.4b2 +bleach==6.1.0 +blinker==1.7.0 +cachetools==5.3.1 +certifi==2023.5.7 +cffi==1.15.1 +chainer==7.8.1 +charset-normalizer==3.1.0 +click==8.1.7 +click-aliases==1.0.1 +cloudpickle==2.2.1 +cmake==3.26.4 +coloredlogs==15.0.1 +comm==0.1.4 +contourpy==1.2.0 +custom-passes-reduce==0.0.0 +cycler==0.12.1 +debugpy==1.8.0 +decorator==5.1.1 +defusedxml==0.7.1 +easydict==1.7 +exceptiongroup==1.1.3 +executing==1.2.0 +fastjsonschema==2.18.1 +filelock==3.12.2 +Flask==3.0.0 +flask-babel==4.0.0 +flatbuffers==1.12 +fonttools==4.44.0 +fqdn==1.5.1 +frozenlist==1.4.0 +fsspec==2023.9.0 +future==0.18.3 +fvcore==0.1.5.post20221221 +gast==0.4.0 +gitdb==4.0.10 +GitPython==3.1.31 +google-auth==2.22.0 +google-auth-oauthlib==0.4.6 +google-pasta==0.2.0 +greenlet==3.0.1 +grpcio==1.59.2 +h11==0.14.0 +h5py==3.9.0 +httpcore==1.0.2 +httpx==0.25.1 +huggingface-hub==0.17.1 +humanfriendly==10.0 +hydra-core==1.3.2 +idna==3.4 +imageio==2.31.5 +imgaug==0.4.0 +importlib-metadata==6.8.0 +importlib-resources==6.1.1 +iopath==0.1.8 +ipykernel==6.25.2 +ipython==8.14.0 +isoduration==20.11.0 +itsdangerous==2.1.2 +jedi==0.18.2 +Jinja2==3.1.2 +joblib==1.3.2 +json-tricks==3.17.3 +json5==0.9.14 +jsonpointer==2.4 +jsonschema==4.19.1 +jsonschema-specifications==2023.7.1 +jupyter-events==0.8.0 +jupyter-lsp==2.2.0 +jupyter_client==8.4.0 +jupyter_core==5.4.0 +jupyter_server==2.8.0 +jupyter_server_terminals==0.4.4 +jupyterlab==4.0.7 +jupyterlab-pygments==0.2.2 +jupyterlab_server==2.25.0 +keras==2.9.0 +Keras-Preprocessing==1.1.2 +kiwisolver==1.4.5 +lazy_loader==0.3 +libclang==16.0.6 +lit==16.0.5.post0 +lmdb==1.4.1 +loguru==0.7.2 +Markdown==3.4.4 +markdown-it-py==3.0.0 +MarkupSafe==2.1.3 +matplotlib==3.8.1 +matplotlib-inline==0.1.6 +mdurl==0.1.2 +mistune==3.0.2 +mmcv==2.0.0rc4 +mmdeploy==1.3.0 +mmdeploy-runtime==1.3.0 +mmdeploy-runtime-gpu==1.3.0 +mmdet==3.2.0 +mmengine==0.9.1 +mpmath==1.3.0 +multidict==6.0.4 +mypy-extensions==1.0.0 +nbclient==0.8.0 +nbconvert==7.9.2 +nbformat==5.9.2 +nest-asyncio==1.5.8 +networkx==3.1 +ninja==1.11.1.1 +notebook_shim==0.2.3 +numpy==1.26.1 +oauthlib==3.2.2 +omegaconf==2.3.0 +onnx==1.15.0 +onnx-simplifier==0.4.33 +onnxconverter-common==1.14.0 +onnxoptimizer==0.3.13 +onnxruntime==1.16.1 +onnxruntime-tools==1.7.0 +onnxsim==0.4.10 +opencv-python==4.8.1.78 +opt-einsum==3.3.0 +overrides==7.4.0 +packaging==23.2 +paddle2onnx==1.1.0 +paddlepaddle==2.5.2 +pandas==2.0.2 +pandocfilters==1.5.0 +parso==0.8.3 +pathlib2==2.3.7.post1 +pathspec==0.11.2 +pexpect==4.8.0 +pickleshare==0.7.5 +Pillow==10.1.0 +platformdirs==3.11.0 +portalocker==2.8.2 +prettytable==3.9.0 +prometheus-client==0.17.1 +prompt-toolkit==3.0.39 +protobuf==3.20.2 +psutil==5.9.5 +ptyprocess==0.7.0 +pure-eval==0.2.2 +py-cpuinfo==9.0.0 +py3nvml==0.2.7 +pyarrow==13.0.0 +pyascendie==0.0.0 +pyasn1==0.5.0 +pyasn1-modules==0.3.0 +pyclipper==1.3.0.post5 +pycocotools==2.0.7 +pycparser==2.21 +pycryptodome==3.19.0 +pydot==1.4.2 +Pygments==2.16.1 +pyparsing==3.1.1 +python-dateutil==2.8.2 +python-json-logger==2.0.7 +pytz==2023.3 +PyYAML==6.0.1 +pyzmq==25.1.1 +rarfile==4.1 +referencing==0.30.2 +regex==2023.10.3 +requests==2.31.0 +requests-oauthlib==1.3.1 +responses==0.18.0 +rfc3339-validator==0.1.4 +rfc3986-validator==0.1.1 +rich==13.6.0 +rpds-py==0.10.6 +rsa==4.9 +safetensors==0.3.3 +scikit-image==0.22.0 +scikit-learn==1.3.1 +scipy==1.10.1 +seaborn==0.12.2 +Send2Trash==1.8.2 +sentry-sdk==1.31.0 +Shapely==1.6.4 +six==1.16.0 +skl2onnx==1.15.0 +sklearn==0.0 +smmap==5.0.0 +sniffio==1.3.0 +soupsieve==2.5 +stack-data==0.6.2 +sympy==1.12 +synr==0.5.0 +tabulate==0.9.0 +termcolor==2.3.0 +terminado==0.17.1 +terminaltables==3.1.10 +tf2onnx==1.15.1 +thop==0.1.1.post2209072238 +threadpoolctl==3.2.0 +tifffile==2023.8.30 +timm==0.6.13 +tinycss2==1.2.1 +tokenizers==0.14.1 +toml==0.10.2 +tomli==2.0.1 +torch==2.0.1 +torchaudio==2.0.2 +torchsummary==1.5.1 +torchvision==0.15.2 +tornado==6.3.2 +tqdm==4.64.0 +traitlets==5.9.0 +transformers==4.34.0 +triton==2.0.0 +typeguard==2.13.3 +types-python-dateutil==2.8.19.14 +typing_extensions==4.5.0 +tzdata==2023.3 +uri-template==1.3.0 +urllib3==1.26.16 +virtualenv==20.24.5 +visualdl==2.5.3 +wcwidth==0.2.9 +webcolors==1.13 +webencodings==0.5.1 +websocket-client==1.6.4 +websockets==12.0 +Werkzeug==3.0.1 +wikiextractor==3.0.6 +wrapt==1.15.0 +xmltodict==0.13.0 +xxhash==3.4.1 +yacs==0.1.8 +yapf==0.40.2 +yarl==1.9.2 +zipp==3.17.0 +zope.event==5.0 +zope.interface==6.1 diff --git a/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/result_torchaie.json b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/result_torchaie.json new file mode 100644 index 0000000000000000000000000000000000000000..c130fbd73174179f60f88bc76caf3f3e6a2cf805 --- /dev/null +++ b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/result_torchaie.json @@ -0,0 +1 @@ +{"title": "Overall statistical evaluation", "value": [{"key": "Number of images", "value": "50000"}, {"key": "Number of classes", "value": "1000"}, {"key": "Top1 accuracy", "value": "69.6%"}, {"key": "Top2 accuracy", "value": "80.52%"}, {"key": "Top3 accuracy", "value": "84.88%"}, {"key": "Top4 accuracy", "value": "87.35%"}, {"key": "Top5 accuracy", "value": "89.06%"}]} \ No newline at end of file diff --git a/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/run.py b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/run.py new file mode 100644 index 0000000000000000000000000000000000000000..5ffd5aba6b26b4aa3134e6d06fc95060b53c52a0 --- /dev/null +++ b/AscendIE/TorchAIE/built-in/cv/classification/mobilenetv1/run.py @@ -0,0 +1,195 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import json +import time +from tqdm import tqdm +import argparse + +import numpy as np +import torch + +import torch_aie +from torch_aie import _enums + +np.set_printoptions(threshold=sys.maxsize) + +LABEL_FILE = "HiAI_label.json" + + +def gen_file_name(img_name): + full_name = img_name.split('/')[-1] + index = full_name.rfind('.') + return full_name[:index] + + +def cre_groundtruth_dict(gtfile_path): + """ + :param filename: file contains the imagename and label number + :return: dictionary key imagename, value is label number + """ + img_gt_dict = {} + for gtfile in os.listdir(gtfile_path): + if (gtfile != LABEL_FILE): + with open(os.path.join(gtfile_path, gtfile), 'r') as f: + gt = json.load(f) + ret = gt["image"]["annotations"][0]["category_id"] + img_gt_dict[gen_file_name(gtfile)] = ret + return img_gt_dict + + +def cre_groundtruth_dict_fromtxt(gtfile_path): + """ + :param filename: file contains the imagename and label number + :return: dictionary key imagename, value is label number + """ + img_gt_dict = {} + with open(gtfile_path, 'r')as f: + for line in f.readlines(): + temp = line.strip().split(" ") + imgName = temp[0].split(".")[0] + imgLab = temp[1] + img_gt_dict[imgName] = imgLab + return img_gt_dict + + +def load_statistical_predict_result(filepath): + """ + function: + the prediction esult file data extraction + input: + result file:filepath + output: + n_label:numble of label + data_vec: the probabilitie of prediction in the 1000 + :return: probabilities, numble of label, in_type, color + """ + with open(filepath, 'r')as f: + data = f.readline() + temp = data.strip().split(" ") + n_label = len(temp) + if data == '': + n_label = 0 + data_vec = np.zeros((n_label), dtype=np.float32) + in_type = '' + color = '' + if n_label == 0: + in_type = f.readline() + color = f.readline() + else: + for ind, prob in enumerate(temp): + data_vec[ind] = np.float32(prob) + return data_vec, n_label, in_type, color + + +def create_visualization_statistical_result(result_store_path, json_file_name, + img_gt_dict, ts_model_path, input_bin_folder_path, topn=5): + """ + :param prediction_file_path: + :param result_store_path: + :param json_file_name: + :param img_gt_dict: + :param topn: + :return: + """ + writer = open(os.path.join(result_store_path, json_file_name), 'w') + table_dict = {} + table_dict["title"] = "Overall statistical evaluation" + table_dict["value"] = [] + + count = 0 + resCnt = 0 + n_labels = 1000 + count_hit = np.zeros(topn) + + ts_model = torch.jit.load(ts_model_path) + input_info = [torch_aie.Input((1, 3, 224, 224))] + torch_aie.set_device(0) + print("Start compiling...") + torchaie_model = torch_aie.compile( + ts_model, + inputs=input_info, + precision_policy=_enums.PrecisionPolicy.FP32, + soc_version='Ascend310P3' + ) + print("Compile finished!") + torchaie_model.eval() + + list_files_input_bin = os.listdir(input_bin_folder_path) + list_files_input_bin.sort() + + for binfile_name in tqdm(list_files_input_bin): + count += 1 + input_bin_filepath = os.path.join(input_bin_folder_path, binfile_name) + input_np_arr = np.fromfile(input_bin_filepath, dtype=np.float32).reshape((1, 3, 224, 224)) # 保存进去就是C在最后一维度,因此读出也要保持一致! + input_tensor = torch.tensor(input_np_arr, dtype=torch.float32) + input_tensor = input_tensor.to("npu:0") + pred = torchaie_model.forward(input_tensor) + pred = pred.to("cpu") + sort_index = np.argsort(-pred.detach().numpy())[0] + + img_name = binfile_name.split('.')[0] + gt = img_gt_dict[img_name] + realLabel = int(gt) + resCnt = min(len(sort_index), topn) + for i in range(resCnt): + if (str(realLabel) == str(sort_index[i])): + count_hit[i] += 1 + break + + if 'value' not in table_dict.keys(): + print("the item value does not exist!") + else: + table_dict["value"].extend( + [{"key": "Number of images", "value": str(count)}, + {"key": "Number of classes", "value": str(n_labels)}]) + if count == 0: + accuracy = 0 + else: + accuracy = np.cumsum(count_hit) / count + for i in range(resCnt): + table_dict["value"].append({"key": "Top" + str(i + 1) + " accuracy", + "value": str( + round(accuracy[i] * 100, 2)) + '%'}) + json.dump(table_dict, writer) + writer.close() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--annotation_file_path", default = "/home/ascend/imagenet/val_label.txt", type=str, help="annotation file path") + parser.add_argument("--result_json_path", default="./") + parser.add_argument("--json_file_name", default="result_torchaie.json") + parser.add_argument("--ts_model_path", default="./mobilenetv1.ts") + parser.add_argument("--input_bin_folder_path", default="./prep_dataset") + args = parser.parse_args() + annotation_file_path = args.annotation_file_path + result_json_path = args.result_json_path + json_file_name = args.json_file_name + ts_model_path = args.ts_model_path + input_bin_folder_path = args.input_bin_folder_path + + if not (os.path.exists(annotation_file_path)): + print("Ground truth file does not exist.") + + if not (os.path.exists(result_json_path)): + print("Result folder doesn't exist.") + + img_label_dict = cre_groundtruth_dict_fromtxt(annotation_file_path) + create_visualization_statistical_result(result_json_path, json_file_name, + img_label_dict, ts_model_path, input_bin_folder_path, topn=5) + +