diff --git a/ACL_PyTorch/contrib/cv/detection/FCENet/fcenet_postprocess.py b/ACL_PyTorch/contrib/cv/detection/FCENet/fcenet_postprocess.py index f48b899d97681ad693e0a69bb1455c50aff03d42..a157d981a1e2724d05866b3450b2937c0822b57b 100644 --- a/ACL_PyTorch/contrib/cv/detection/FCENet/fcenet_postprocess.py +++ b/ACL_PyTorch/contrib/cv/detection/FCENet/fcenet_postprocess.py @@ -78,12 +78,9 @@ if __name__ == '__main__': parser.add_argument('--instance_file',type=str,default='./mmocr/data/icdar2015/instances_test.json') parser.add_argument('--output_file',type=str,default='./boundary_results.txt') args = parser.parse_args() - #print(args.input_path) - #print(args.output_file) - #prediction_file_path = './result/' prediction_file_path = args.input_path for root, dirs, files in os.walk(prediction_file_path): - prediction_file_path = os.path.join(prediction_file_path,dirs[0]) + prediction_file_path = os.path.join(prediction_file_path,dirs[-1]) break container = [] count = 0 @@ -96,7 +93,6 @@ if __name__ == '__main__': container[i][j].append([]) img_idx = [] - #file_name = './mmocr/data/icdar2015/instances_test.json' file_name = args.instance_file img_name = [] @@ -113,14 +109,16 @@ if __name__ == '__main__': img_idx.append(int(img_num)) for tfile_name in tqdm(os.listdir(prediction_file_path)): + if tfile_name.split('.')[1] is not 'txt': + continue tmp = tfile_name.split('.')[0] index = tmp.rfind('_') img_name = tmp[:index] - index1 = img_name.rfind('_') + img_name_copy = img_name + index1 = img_name_copy.rfind('_') img_name = tmp[:index1] - - index2 = img_name.rfind('_')+1 - flag = int(img_name[index2:]) + index2 = img_name_copy.rfind('_')+1 + flag = int(img_name_copy[index2:]) lines = '' with open(os.path.join(prediction_file_path,tfile_name), 'r') as f: diff --git a/ACL_PyTorch/contrib/cv/detection/FCENet/fcenet_preprocess.py b/ACL_PyTorch/contrib/cv/detection/FCENet/fcenet_preprocess.py index 63e4908a5a0fef8f37b975f1d964af2f20bc02af..4e9011512e745a21bb3428877d51c85a499f58f0 100644 --- a/ACL_PyTorch/contrib/cv/detection/FCENet/fcenet_preprocess.py +++ b/ACL_PyTorch/contrib/cv/detection/FCENet/fcenet_preprocess.py @@ -18,7 +18,7 @@ import os import warnings from argparse import ArgumentParser, Namespace from pathlib import Path - +from tqdm.contrib import tzip import mmcv import numpy as np import torch @@ -218,13 +218,11 @@ class MMOCR: arr_chunks = [ arrays[i:i + n] for i in range(0, len(arrays), n) ] - #for chunk in arr_chunks: - #print("arr_chunks:",arr_chunks) - #print("filenames:",filenames) + for (chunk,filename) in zip(arr_chunks,filenames) : model_inference(model, chunk, filename, batch_mode=True) else: - for (arr,filename) in zip(arrays,filenames) : + for (arr,filename) in tzip(arrays,filenames) : model_inference(model, arr, filename, batch_mode=False) # Arguments pre-processing function diff --git a/ACL_PyTorch/contrib/cv/detection/FCENet/readme.md b/ACL_PyTorch/contrib/cv/detection/FCENet/readme.md index 217d9ed57e6f96dd67918363e7e4be4053413288..91a2e099151c0034b41a8605063a95cb3d2cc0a5 100644 --- a/ACL_PyTorch/contrib/cv/detection/FCENet/readme.md +++ b/ACL_PyTorch/contrib/cv/detection/FCENet/readme.md @@ -50,7 +50,8 @@ FCENet,使用傅里叶变换来得到文本的包围框,该方法在弯曲 | 固件与驱动 | 1.0.17 | [Pytorch框架推理环境准备](https://www.hiascend.com/document/detail/zh/ModelZoo/pytorchframework/pies) | | CANN | 6.0.RC1 | - | | Python | 3.7.5 | - | - + | torch | 1.8.0 | - | + 说明:请根据推理卡型号与 CANN 版本选择相匹配的固件与驱动版本。 @@ -115,7 +116,6 @@ FCENet,使用傅里叶变换来得到文本的包围框,该方法在弯曲 参数说明: + --dynamic-export: 是否动态导出onnx模型 + --output-file: 输出onnx的文件名。 - + --shape :输出onnx的可输入数据量 2. ONNX 模型转 OM 模型 @@ -174,8 +174,7 @@ FCENet,使用傅里叶变换来得到文本的包围框,该方法在弯曲 python3 -m ais_bench \ --model ./fcenet_bs${batch_size} \ --input ./preprocessed_imgs/ \ - --output ./ \ - --output_dirname ./result/ \ + --output ./result \ --outfmt TXT \ --batchsize ${batch_size} ``` @@ -183,7 +182,6 @@ FCENet,使用傅里叶变换来得到文本的包围框,该方法在弯曲 + --model OM模型路径 + --input 存放预处理后数据的目录路径 + --output 用于存放推理结果的父目录路径 - + --output_dirname 用于存放推理结果的子目录名,位于--output指定的目录下 + --outfmt 推理结果文件的保存格式 + --batchsize 模型每次输入bin文件的数量,本例中为1。 @@ -203,8 +201,8 @@ FCENet,使用傅里叶变换来得到文本的包围框,该方法在弯曲 执行后处理脚本,根据推理结果计算OM模型的精度: ```bash python3 fcenet_postprocess.py \ - --input_path=./result \ - --instance_file=./mmocr/data/icdar2015/instances_test.json \ + --input_path=./result \ + --instance_file=./mmocr/data/icdar2015/instances_test.json \ --output_file=./boundary_results.txt python3 eval.py \ ./mmocr/configs/textdet/fcenet/fcenet_r50_fpn_1500e_icdar2015.py \ diff --git a/ACL_PyTorch/contrib/cv/detection/FCENet/requirements.txt b/ACL_PyTorch/contrib/cv/detection/FCENet/requirements.txt index 0d37589fcdfb8f94cce0d763a1c73444103c0bc1..03f2a9af53ad0429a59748576f35c76d3bf76813 100644 --- a/ACL_PyTorch/contrib/cv/detection/FCENet/requirements.txt +++ b/ACL_PyTorch/contrib/cv/detection/FCENet/requirements.txt @@ -1,13 +1,15 @@ torchvision == 0.12.0 torchaudio == 0.11.0 - +torch==1.13.0 onnx==1.11.0 onnxruntime ==1.11.0 -opencv-python +opencv-contrib-python==3.4.11.45 +opencv-python==4.6.0.66 +opencv-python-headless==4.2.0.34 cython -mmcv-full==1.4.7 +mmcv-full==1.3.17 mmdet==2.22.0 -mmocr==0.4.1 +mmocr==0.6.2 scipy sympy \ No newline at end of file