diff --git a/Samples/ClassficationRetrainingAndInfer/README.md b/Samples/ClassficationRetrainingAndInfer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..140ef8bb036bd38674c18308255c56c6319738f7 --- /dev/null +++ b/Samples/ClassficationRetrainingAndInfer/README.md @@ -0,0 +1,160 @@ +# 性别分类训练和推理 + +#### 样例介绍 + +本样例基于预训练resnet18模型使用性别分类数据集实现了识别人性别的功能,包含训练到om推理全过程。 + +#### 样例下载 + +可以使用以下两种方式下载,请选择其中一种进行源码准备。 + +- 命令行方式下载(**下载时间较长,但步骤简单**)。 + + ``` + # 登录开发板,HwHiAiUser用户命令行中执行以下命令下载源码仓。 + cd ${HOME} + git clone https://gitee.com/ascend/EdgeAndRobotics.git + # 切换到样例目录 + cd EdgeAndRobotics/Samples/ClassficationRetrainingAndInfer + ``` + +- 压缩包方式下载(**下载时间较短,但步骤稍微复杂**)。 + + ``` + # 1. 仓右上角选择 【克隆/下载】 下拉框并选择 【下载ZIP】。 + # 2. 将ZIP包上传到开发板的普通用户家目录中,【例如:${HOME}/EdgeAndRobotics-master.zip】。 + # 3. 开发环境中,执行以下命令,解压zip包。 + cd ${HOME} + chmod +x EdgeAndRobotics-master.zip + unzip EdgeAndRobotics-master.zip + # 4. 切换到样例目录 + cd EdgeAndRobotics-master/Samples/ClassficationRetrainingAndInfer + ``` + +#### 执行准备 + +- 本样例中的模型支持PyTorch2.1.0、torchvision1.16.0版本,请参考[安装PyTorch](https://www.hiascend.com/document/detail/zh/canncommercial/700/envdeployment/instg/instg_0046.html)章节安装PyTorch以及torch_npu插件。 + ``` + # torch_npu由于需要源码编译,速度可能较慢,本样例提供 python3.9,torch2.1版本的torch_npu whl包 + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/wanzutao/torch_npu-2.1.0rc1-cp39-cp39-linux_aarch64.whl + + # 使用pip命令安装 + pip3 install torch_npu-2.1.0rc1-cp39-cp39-linux_aarch64.whl + ``` + +- 本样例中的模型还依赖一些其它库(具体依赖哪些库,可查看本样例目录下的requirements.txt文件),可执行以下命令安装: + + ``` + pip3 install -r requirements.txt # PyTorch2.1版本 + ``` + +- 配置离线推理所需的环境变量。 + + ``` + # 配置程序编译依赖的头文件与库文件路径 + export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest + export NPU_HOST_LIB=$DDK_PATH/runtime/lib64/stub + ``` + +- 安装离线推理所需的ACLLite库。 + + 参考[ACLLite仓](https://gitee.com/ascend/ACLLite)安装ACLLite库。 + + +#### 模型训练 + +1. 以HwHiAiUser用户登录开发板,切换到样例目录下。 +2. 准备数据集 + ``` + cd dataset + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/wanzutao/gender.zip + unzip gender.zip + ``` + +3. 设置环境变量减小算子编译内存占用。 + ``` + export TE_PARALLEL_COMPILER=1 + export MAX_COMPILE_CORE_NUMBER=1 + ``` +4. 运行训练脚本。 + + ``` + cd .. + python3 main.py + ``` + 训练完成后,权重文件保存在models目录下,并输出模型训练精度和性能信息。 + + 此处展示单Device、batch_size=8的训练结果数据: + | NAME | Acc@1 | FPS | Epochs | AMP_Type | Torch_Version | + | :----: | :---: | :---: | :----: | :------: | :-----------: | + | 1p-NPU | 95.59 | 4 | 10 | O2 | 2.1 | + + +#### 离线推理 + +1. 以HwHiAiUser用户登录开发板,切换到当前样例目录。 +2. 导出onnx模型 + ``` + python3 export.py + ``` + +3. 获取测试图片数据。 + + ``` + cd omInfer/data + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/wanzutao/classfication/8.jpg + ``` + + **注:**若需更换测试图片,则需自行准备测试图片,并将测试图片放到omInfer/data目录下,并修改代码中图片名称。 + +4. 获取PyTorch框架的ResNet50模型(\*.onnx),并转换为昇腾AI处理器能识别的模型(\*.om)。 + - 当设备内存**小于8G**时,可设置如下两个环境变量减少atc模型转换过程中使用的进程数,减小内存占用。 + ``` + export TE_PARALLEL_COMPILER=1 + export MAX_COMPILE_CORE_NUMBER=1 + ``` + - 为了方便下载,在这里直接给出原始模型下载及模型转换命令,可以直接拷贝执行。 + ``` + # 将导出的resnet18.onnx模型拷贝到model目录下 + cd ../model + cp ../../resnet18.onnx ./ + + # 获取AIPP配置文件 + wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/wanzutao/classfication/aipp.cfg + + # 模型转换 + atc --model=resnet18.onnx --framework=5 --insert_op_conf=aipp.cfg --output=resnet18 --soc_version=Ascend310B4 + ``` + + atc命令中各参数的解释如下,详细约束说明请参见[《ATC模型转换指南》](https://hiascend.com/document/redirect/CannCommunityAtc)。 + + - --model:转换前模型文件的路径。 + - --framework:原始框架类型。5表示ONNX。 + - --output:转换后模型文件的路径。请注意,记录保存该om模型文件的路径,后续开发应用时需要使用。 + - --input\_shape:模型输入数据的shape。 + - --soc\_version:昇腾AI处理器的版本。 + +5. 编译样例源码。 + + 执行以下命令编译样例源码。 + + ``` + cd ../scripts + bash sample_build.sh + ``` + +6. 运行样例。 + + 执行以下脚本运行样例: + + ``` + bash sample_run.sh + ``` + + 执行成功后,在屏幕上的关键提示信息示例如下: + + ``` + value[0.893066] output[female] + ``` + +#### 相关操作 \ No newline at end of file diff --git a/Samples/ClassficationRetrainingAndInfer/dataset/.keep b/Samples/ClassficationRetrainingAndInfer/dataset/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Samples/ClassficationRetrainingAndInfer/export.py b/Samples/ClassficationRetrainingAndInfer/export.py new file mode 100644 index 0000000000000000000000000000000000000000..ff01023e53a36ec27da15a5fe537d433c04a6855 --- /dev/null +++ b/Samples/ClassficationRetrainingAndInfer/export.py @@ -0,0 +1,38 @@ +import torch +import torchvision.models as models +model_dir = "models/model_best.pth.tar" +output = "resnet18.onnx" +device = torch.device("cpu") +num_classes = 2 +print('=> running on device ' + str(device)) +print('=> loading checkpoint: ' + model_dir) +checkpoint = torch.load(model_dir,map_location=torch.device('cpu')) + +# create the model architecture +print('=> using model: resnet18') +model = models.resnet18(pretrained=True) + +# reshape the model's output +model.fc = torch.nn.Linear(model.fc.in_features, num_classes) + +# load the model weights +model.load_state_dict(checkpoint['state_dict']) + +print('=> adding nn.Softmax layer to model') +model = torch.nn.Sequential(model, torch.nn.Softmax(1)) + +model.to(device) +model.eval() + +# create example image data +resolution = checkpoint['resolution'] +input = torch.ones((1, 3, resolution, resolution)) +print('=> input size: {:d}x{:d}'.format(resolution, resolution)) + +# export the model +input_names = [ "input_0" ] +output_names = [ "output_0" ] + +print('=> exporting model to ONNX...') +torch.onnx.export(model, input, output, verbose=True, input_names=input_names, output_names=output_names) +print('=> model exported to: {:s}'.format(output)) \ No newline at end of file diff --git a/Samples/ClassficationRetrainingAndInfer/main.py b/Samples/ClassficationRetrainingAndInfer/main.py new file mode 100644 index 0000000000000000000000000000000000000000..eeabed4eb9cad16eb55573082fbfc79641894023 --- /dev/null +++ b/Samples/ClassficationRetrainingAndInfer/main.py @@ -0,0 +1,273 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import os +import time +import torch_npu +import torchvision.datasets as datasets +import torchvision.models as models +from torch_npu.npu import amp +from torch.utils.tensorboard import SummaryWriter +import datetime +import torchvision.transforms as transforms +import shutil + +model_path = "models" +device = torch.device('npu:0') +tensorboard = SummaryWriter(log_dir=os.path.join(model_path, "tensorboard", f"{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}")) +best_accuracy = 0 + + +class AverageMeter(object): + """ + Computes and stores the average and current value + """ + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + +class ProgressMeter(object): + """ + Progress metering + """ + def __init__(self, num_batches, meters, prefix=""): + self.batch_fmtstr = self._get_batch_fmtstr(num_batches) + self.meters = meters + self.prefix = prefix + + def display(self, batch): + entries = [self.prefix + self.batch_fmtstr.format(batch)] + entries += [str(meter) for meter in self.meters] + print(' '.join(entries)) + + def _get_batch_fmtstr(self, num_batches): + num_digits = len(str(num_batches // 1)) + fmt = '{:' + str(num_digits) + 'd}' + return '[' + fmt + '/' + fmt.format(num_batches) + ']' + +def accuracy(output, target): + """ + Computes the accuracy of predictions vs groundtruth + """ + with torch.no_grad(): + + output = F.softmax(output, dim=-1) + _, preds = torch.max(output, dim=-1) + preds = (preds == target) + + return preds.float().mean().cpu().item() * 100.0 + +def train(train_loader, model, criterion, optimizer,scaler, epoch): + """ + Train one epoch over the dataset + """ + batch_time = AverageMeter('Time', ':6.3f') + data_time = AverageMeter('Data', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + acc = AverageMeter('Accuracy', ':7.3f') + + progress = ProgressMeter( + len(train_loader), + [batch_time, data_time, losses, acc], + prefix=f"Epoch: [{epoch}]") + + # switch to train mode + model.train() + + # get the start time + epoch_start = time.time() + end = epoch_start + + # train over each image batch from the dataset + for i, (images, target) in enumerate(train_loader): + + # measure data loading time + data_time.update(time.time() - end) + + images = images.to(device,non_blocking=True) + target = target.to(device,non_blocking=True) + + # compute output + with amp.autocast(): + output = model(images) + loss = criterion(output, target) + + # record loss and measure accuracy + losses.update(loss.item(), images.size(0)) + acc.update(accuracy(output, target), images.size(0)) + + # compute gradient and do SGD step + optimizer.zero_grad() + scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update() + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if i % 50 == 0 or i == len(train_loader)-1: + progress.display(i) + + print(f"Epoch: [{epoch}] completed, elapsed time {time.time() - epoch_start:6.3f} seconds") + + tensorboard.add_scalar('Loss/train', losses.avg, epoch) + tensorboard.add_scalar('Accuracy/train', acc.avg, epoch) + return losses.avg, acc.avg + +def validate(val_loader, model, criterion, epoch): + """ + Measure model performance across the val dataset + """ + batch_time = AverageMeter('Time', ':6.3f') + losses = AverageMeter('Loss', ':.4e') + acc = AverageMeter('Accuracy', ':7.3f') + + progress = ProgressMeter( + len(val_loader), + [batch_time, losses, acc], + prefix='Val: ') + + # switch to evaluate mode + model.eval() + + with torch.no_grad(): + end = time.time() + for i, (images, target) in enumerate(val_loader): + images = images.to(device,non_blocking=True) + target = target.to(device,non_blocking=True) + # compute output + with amp.autocast(): + output = model(images) + loss = criterion(output, target) + # record loss and measure accuracy + losses.update(loss.item(), images.size(0)) + acc.update(accuracy(output, target), images.size(0)) + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + if i % 10 == 0 or i == len(val_loader)-1: + progress.display(i) + + tensorboard.add_scalar('Loss/val', losses.avg, epoch) + tensorboard.add_scalar('Accuracy/val', acc.avg, epoch) + + return losses.avg, acc.avg + +def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', best_filename='model_best.pth.tar', labels_filename='labels.txt'): + """ + Save a model checkpoint file, along with the best-performing model if applicable + """ + model_dir = os.path.expanduser(model_path) + + if not os.path.exists(model_dir): + os.mkdir(model_dir) + + filename = os.path.join(model_dir, filename) + best_filename = os.path.join(model_dir, best_filename) + labels_filename = os.path.join(model_dir, labels_filename) + + # save the checkpoint + torch.save(state, filename) + + # earmark the best checkpoint + if is_best: + shutil.copyfile(filename, best_filename) + print(f"saved best model to: {best_filename}") + else: + print(f"saved checkpoint to: {filename}") + + # save labels.txt on the first epoch + if state['epoch'] == 0: + with open(labels_filename, 'w') as file: + for label in state['classes']: + file.write(f"{label}\n") + print(f"saved class labels to: {labels_filename}") + + +def main(): + global best_accuracy + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + train_transforms = transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ]) + + val_transforms = transforms.Compose([ + transforms.Resize(224), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ]) + train_dataset = datasets.ImageFolder("./dataset/train", train_transforms) + val_dataset = datasets.ImageFolder("./dataset/val", val_transforms) + train_loader = torch.utils.data.DataLoader( + train_dataset, batch_size=8, shuffle=True, + num_workers=3, pin_memory=True) + + val_loader = torch.utils.data.DataLoader( + val_dataset, batch_size=16, shuffle=False, + num_workers=3, pin_memory=True) + model = models.resnet18(pretrained=True) + num_classes = len(train_dataset.classes) + model.fc = torch.nn.Linear(model.fc.in_features, num_classes) + model = model.to(device) + criterion = nn.CrossEntropyLoss() + lr = 0.1 + momentum = 0.9 + weight_decay = 1e-4 + optimizer = torch.optim.SGD(model.parameters(), lr, + momentum=momentum, + weight_decay=weight_decay) + scaler = amp.GradScaler() + epochs = 10 + for epoch in range(epochs): + + train_loss, train_acc = train(train_loader, model, criterion, optimizer,scaler, epoch) + val_loss, val_acc = validate(val_loader, model, criterion, epoch) + + # remember best acc@1 and save checkpoint + is_best = val_acc > best_accuracy + best_accuracy = max(val_acc, best_accuracy) + + print(f"=> Epoch {epoch}") + print(f" * Train Loss {train_loss:.4e}") + print(f" * Train Accuracy {train_acc:.4f}") + print(f" * Val Loss {val_loss:.4e}") + print(f" * Val Accuracy {val_acc:.4f}{'*' if is_best else ''}") + + save_checkpoint({ + 'epoch': epoch, + 'arch': "resnet18", + 'resolution': 224, + 'classes': train_dataset.classes, + 'num_classes': len(train_dataset.classes), + 'multi_label': False, + 'state_dict': model.state_dict(), + 'accuracy': {'train': train_acc, 'val': val_acc}, + 'loss' : {'train': train_loss, 'val': val_loss}, + 'optimizer' : optimizer.state_dict(), + }, is_best) + +if __name__ == '__main__': + main() diff --git a/Samples/ClassficationRetrainingAndInfer/models/.keep b/Samples/ClassficationRetrainingAndInfer/models/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Samples/ClassficationRetrainingAndInfer/omInfer/data/.keep b/Samples/ClassficationRetrainingAndInfer/omInfer/data/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Samples/ClassficationRetrainingAndInfer/omInfer/model/.keep b/Samples/ClassficationRetrainingAndInfer/omInfer/model/.keep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Samples/ClassficationRetrainingAndInfer/omInfer/scripts/sample_build.sh b/Samples/ClassficationRetrainingAndInfer/omInfer/scripts/sample_build.sh new file mode 100644 index 0000000000000000000000000000000000000000..d5837a40c294238f7f5d19c720efd49695b7989a --- /dev/null +++ b/Samples/ClassficationRetrainingAndInfer/omInfer/scripts/sample_build.sh @@ -0,0 +1,40 @@ +#!/bin/bash +ScriptPath="$( cd "$(dirname "$BASH_SOURCE")" ; pwd -P )" + +function build() +{ + if [ -d ${ScriptPath}/../out ];then + rm -rf ${ScriptPath}/../out + fi + + if [ -d ${ScriptPath}/../build/intermediates/host ];then + rm -rf ${ScriptPath}/../build/intermediates/host + fi + + mkdir -p ${ScriptPath}/../build/intermediates/host + cd ${ScriptPath}/../build/intermediates/host + + cmake ../../../src -DCMAKE_CXX_COMPILER=g++ -DCMAKE_SKIP_RPATH=TRUE + if [ $? -ne 0 ];then + echo "[ERROR] cmake error, Please check your environment!" + return 1 + fi + make + if [ $? -ne 0 ];then + echo "[ERROR] build failed, Please check your environment!" + return 1 + fi + cd - > /dev/null +} + +function main() +{ + echo "[INFO] Sample preparation" + build + if [ $? -ne 0 ];then + return 1 + fi + echo "[INFO] Sample preparation is complete" +} +main + diff --git a/Samples/ClassficationRetrainingAndInfer/omInfer/scripts/sample_run.sh b/Samples/ClassficationRetrainingAndInfer/omInfer/scripts/sample_run.sh new file mode 100644 index 0000000000000000000000000000000000000000..2fe8dade67d7e7602a1a3f8b5eecad1fe4ed2a97 --- /dev/null +++ b/Samples/ClassficationRetrainingAndInfer/omInfer/scripts/sample_run.sh @@ -0,0 +1,17 @@ +#!/bin/bash +ScriptPath="$( cd "$(dirname "$BASH_SOURCE")" ; pwd -P )" + +function main() +{ + echo "[INFO] The sample starts to run" + running_command="./main" + cd ${ScriptPath}/../out + ${running_command} + if [ $? -ne 0 ];then + echo "[INFO] The program runs failed" + else + echo "[INFO] The program runs successfully" + fi +} +main + diff --git a/Samples/ClassficationRetrainingAndInfer/omInfer/src/CMakeLists.txt b/Samples/ClassficationRetrainingAndInfer/omInfer/src/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..5dd1bd44909c6184164cf59adf854624997ab8f8 --- /dev/null +++ b/Samples/ClassficationRetrainingAndInfer/omInfer/src/CMakeLists.txt @@ -0,0 +1,49 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2019. All rights reserved. + +cmake_minimum_required(VERSION 3.5.1) + +project(sampleResnet) + +add_compile_options(-std=c++11) + +add_definitions(-DENABLE_DVPP_INTERFACE) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "../../../out") +set(CMAKE_CXX_FLAGS_DEBUG "-fPIC -O0 -g -Wall") +set(CMAKE_CXX_FLAGS_RELEASE "-fPIC -O2 -Wall") + +set(INC_PATH $ENV{DDK_PATH}) +if (NOT DEFINED ENV{DDK_PATH}) + set(INC_PATH "/usr/local/Ascend/ascend-toolkit/latest") + message(STATUS "set default INC_PATH: ${INC_PATH}") +else() + message(STATUS "set INC_PATH: ${INC_PATH}") +endif () + +set(LIB_PATH $ENV{NPU_HOST_LIB}) +if (NOT DEFINED ENV{NPU_HOST_LIB}) + set(LIB_PATH "/usr/local/Ascend/ascend-toolkit/latest/runtime/lib64/stub") + message(STATUS "set default LIB_PATH: ${LIB_PATH}") +else() + message(STATUS "set LIB_PATH: ${LIB_PATH}") +endif () + +include_directories( + ${INC_PATH}/runtime/include/ + ./ +) + +link_directories( + ${LIB_PATH} +) + +add_executable(main + main.cpp) + +if(target STREQUAL "Simulator_Function") + target_link_libraries(main funcsim) +else() + target_link_libraries(main ascendcl acl_dvpp stdc++ dl rt acllite_dvpp_lite acllite_om_execute acllite_common) +endif() + +install(TARGETS main DESTINATION ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) + diff --git a/Samples/ClassficationRetrainingAndInfer/omInfer/src/main.cpp b/Samples/ClassficationRetrainingAndInfer/omInfer/src/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2329fd88bbb6b3944e9fbfec83a61d06d5fc7d02 --- /dev/null +++ b/Samples/ClassficationRetrainingAndInfer/omInfer/src/main.cpp @@ -0,0 +1,54 @@ +#include +#include +#include +#include +#include "acllite_dvpp_lite/ImageProc.h" +#include "acllite_om_execute/ModelProc.h" + +using namespace std; +using namespace acllite; + +int main() +{ + vector labels = { {"female"},{"male"}}; + AclLiteResource aclResource; + bool ret = aclResource.Init(); + CHECK_RET(ret, LOG_PRINT("[ERROR] InitACLResource failed."); return 1); + + ImageProc imageProc; + ModelProc modelProc; + ret = modelProc.Load("../model/resnet18.om"); + CHECK_RET(ret, LOG_PRINT("[ERROR] load model Resnet18.om failed."); return 1); + ImageData src = imageProc.Read("../data/8.jpg"); + CHECK_RET(src.size, LOG_PRINT("[ERROR] ImRead image failed."); return 1); + + ImageData dst; + ImageSize dsize(224, 224); + + imageProc.Resize(src, dst, dsize); + ret = modelProc.CreateInput(static_cast(dst.data.get()), dst.size); + CHECK_RET(ret, LOG_PRINT("[ERROR] Create model input failed."); return 1); + vector inferOutputs; + ret = modelProc.Execute(inferOutputs); + CHECK_RET(ret, LOG_PRINT("[ERROR] model execute failed."); return 1); + + uint32_t dataSize = inferOutputs[0].size; + // get result from output data set + float* outData = static_cast(inferOutputs[0].data.get()); + if (outData == nullptr) { + LOG_PRINT("get result from output data set failed."); + return 1; + } + int index = 0; + float max = 0; + for (uint32_t j = 0; j < dataSize / sizeof(float); ++j) { + if (outData[j] > max){ + max = outData[j]; + index = j; + } + } + LOG_PRINT("[INFO] value[%lf] output[%s]", outData[index] , labels[index].c_str()); + outData = nullptr; + return 0; +} + diff --git a/Samples/ClassficationRetrainingAndInfer/requirements.txt b/Samples/ClassficationRetrainingAndInfer/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..b605b863e31e8eab8e961e9b02063a365e9285dd --- /dev/null +++ b/Samples/ClassficationRetrainingAndInfer/requirements.txt @@ -0,0 +1,5 @@ +onnx +numpy +opencv-python +protobuf==3.20.2 +tensorboard