From 9ac9f05eed4ea3da3bd9ea32d933301b58d64ee8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=91cyc=E5=AE=87?= <1251519599@qq.com> Date: Wed, 4 Jan 2023 08:30:09 +0000 Subject: [PATCH 1/5] update ACL_TensorFlow/contrib/nlp/Roformer_for_ACL/scripts/generate_random_data.py. https://gitee.com/ascend/modelzoo/issues/I66W8J MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 黑cyc宇 <1251519599@qq.com> --- .../nlp/Roformer_for_ACL/scripts/generate_random_data.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ACL_TensorFlow/contrib/nlp/Roformer_for_ACL/scripts/generate_random_data.py b/ACL_TensorFlow/contrib/nlp/Roformer_for_ACL/scripts/generate_random_data.py index 5b1da6d62..8b5cd5acf 100644 --- a/ACL_TensorFlow/contrib/nlp/Roformer_for_ACL/scripts/generate_random_data.py +++ b/ACL_TensorFlow/contrib/nlp/Roformer_for_ACL/scripts/generate_random_data.py @@ -26,8 +26,8 @@ def generate_random_data(file_path,image_num=32,batchsize=1): if not os.path.exists(sub_dir2): os.makedirs(sub_dir2) for i in range(image_num): - input_data1 = np.random.randn(batchsize,1024).astype(np.float32) - input_data2 = np.random.randn(batchsize,1024).astype(np.float32) + input_data1 = np.random.rand(batchsize,1024).astype(np.float32) + input_data2 = np.random.rand(batchsize,1024).astype(np.float32) input_data1.tofile(os.path.join(sub_dir1,str(i.__str__().zfill(6))+".bin")) input_data2.tofile(os.path.join(sub_dir2,str(i.__str__().zfill(6))+".bin")) print("num:%d random datas has been created under path:%s" %(image_num,file_path)) -- Gitee From 96205ae3430c3354843bed7ab0b5b4d0d8f20ca3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=91cyc=E5=AE=87?= <1251519599@qq.com> Date: Wed, 4 Jan 2023 09:39:39 +0000 Subject: [PATCH 2/5] update ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/README.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 黑cyc宇 <1251519599@qq.com> --- ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/README.md b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/README.md index bff0e02ee..e8a49ebe7 100644 --- a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/README.md +++ b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/README.md @@ -37,6 +37,8 @@ cd .. cd scripts python coco_convert.py --input ./coco/annotations/instances_val2017.json --output val2017.pkl python coco_annotation.py --coco_path ./coco +python img2bin.py --img-dir ./coco/images --bin-dir ./coco/input_bins +mv coco .. ``` 生成coco2017测试数据集目录 *data/dataset/*. -- Gitee From 359ad9e52d3078a06c66e65b467051405a890750 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=91cyc=E5=AE=87?= <1251519599@qq.com> Date: Wed, 4 Jan 2023 09:50:23 +0000 Subject: [PATCH 3/5] update ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/README_EN.md. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 黑cyc宇 <1251519599@qq.com> --- ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/README_EN.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/README_EN.md b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/README_EN.md index 794499120..fe516e5d8 100644 --- a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/README_EN.md +++ b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/README_EN.md @@ -37,6 +37,8 @@ cd .. cd scripts python coco_convert.py --input ./coco/annotations/instances_val2017.json --output val2017.pkl python coco_annotation.py --coco_path ./coco +python img2bin.py --img-dir ./coco/images --bin-dir ./coco/input_bins +mv coco .. ``` There will generate coco2017 test data set under *data/dataset/*. -- Gitee From 15cbb59a68eb2331b7669417751b4b348b522842 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=91cyc=E5=AE=87?= <1251519599@qq.com> Date: Wed, 4 Jan 2023 09:51:29 +0000 Subject: [PATCH 4/5] upload ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/offline_inference/benchmark_tf.sh MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 黑cyc宇 <1251519599@qq.com> --- .../offline_inference/benchmark_tf.sh | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/offline_inference/benchmark_tf.sh diff --git a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/offline_inference/benchmark_tf.sh b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/offline_inference/benchmark_tf.sh new file mode 100644 index 000000000..02d60f420 --- /dev/null +++ b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/offline_inference/benchmark_tf.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +batch_size=$1 + +if [ X"$batch_size" = X ]; then + batch_size=1 +fi + +run_script_file=$(readlink -f "$0") +run_script_dir=$(dirname $run_script_file) +root_dir=$(dirname run_script_dir) + +model_name=Yolov5 +# 已下参数请根据实际路径修改 +data_dir=$root/input_bins # 需要先把图片转成bin +om_file=$root_dir/yolov5_tf2_gpu.om +output_dir=$root_dir/offline_inference/output_bins + +rm -rf $output_dir +mkdir -p $output_dir + +$root_dir/Benchmark/out/benchmark \ +--om $om_file \ +--modelType $model_name \ +--dataDir $data_dir \ +--outDir $output_dir \ +--batchSize $batch_size \ +--imgType bin \ +--useDvpp 0 \ No newline at end of file -- Gitee From 16eb192c6a644118639a127a41c7627cd8ed8a63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=91cyc=E5=AE=87?= <1251519599@qq.com> Date: Wed, 4 Jan 2023 09:52:28 +0000 Subject: [PATCH 5/5] upload ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/offline_inference/scripts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 黑cyc宇 <1251519599@qq.com> --- .../Yolov5_for_ACL/scripts/coco_annotation.py | 75 ++++++++++++ .../cv/Yolov5_for_ACL/scripts/coco_convert.py | 112 ++++++++++++++++++ .../scripts/get_coco_dataset_2017.sh | 39 ++++++ .../cv/Yolov5_for_ACL/scripts/google_utils.py | 39 ++++++ .../cv/Yolov5_for_ACL/scripts/img2bin.py | 33 ++++++ .../cv/Yolov5_for_ACL/scripts/voc/README.md | 27 +++++ .../Yolov5_for_ACL/scripts/voc/get_voc2012.sh | 19 +++ .../Yolov5_for_ACL/scripts/voc/voc_convert.py | 78 ++++++++++++ .../scripts/voc/voc_make_names.py | 46 +++++++ .../Yolov5_for_ACL/scripts/voc_annotation.py | 52 ++++++++ 10 files changed, 520 insertions(+) create mode 100644 ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/coco_annotation.py create mode 100644 ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/coco_convert.py create mode 100644 ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/get_coco_dataset_2017.sh create mode 100644 ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/google_utils.py create mode 100644 ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/img2bin.py create mode 100644 ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/README.md create mode 100644 ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/get_voc2012.sh create mode 100644 ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/voc_convert.py create mode 100644 ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/voc_make_names.py create mode 100644 ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc_annotation.py diff --git a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/coco_annotation.py b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/coco_annotation.py new file mode 100644 index 000000000..ccea7036b --- /dev/null +++ b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/coco_annotation.py @@ -0,0 +1,75 @@ +from absl import app, flags, logging +import os +import pickle +from os import listdir +from os.path import isfile, join +from absl.flags import FLAGS +import cv2 + +flags.DEFINE_string('coco_data', './val2017.pkl', 'path to coco data') +flags.DEFINE_string('classes', '../data/classes/coco.names', 'path to classes file') +flags.DEFINE_string('coco_path', "/Volumes/Elements/data/coco_dataset/coco", 'resize images to') +flags.DEFINE_string('image_path', "images/val2017", 'path to image val') +flags.DEFINE_string('anno_path_val', '../data/dataset/val2017.txt', 'path to classes file') + +def convert_annotation(output, data, data_type = "val"): + class_names = [c.strip() for c in open(FLAGS.classes).readlines()] + replace_dict = {"couch": "sofa", "airplane": "aeroplane", "tv": "tvmonitor", "motorcycle": "motorbike"} + + if os.path.exists(output): os.remove(output) + directory_path = os.path.join(FLAGS.coco_path, FLAGS.image_path) + # if data_type == "train": + # anno_path = directory_path + "/labels/train2014" + # image_path = os.path.join(directory_path, "trainvalno5k.txt") + # else: + # anno_path = directory_path + "/labels/val2014" + # image_path = os.path.join(directory_path, "5k.txt") + # with open(image_path) as f: + # image_paths = f.readlines() + # image_paths = [x.strip() for x in image_paths] + + image_paths = [f for f in listdir(directory_path) if isfile(join(directory_path, f))] + + check_classes = [] + count = 0 + with open(output, 'a') as f: + for image_path in image_paths: + image_inds = image_path.split(".")[0] + annotation = os.path.join(directory_path, image_path) + # if os.path.exists(os.path.join(anno_path, image_inds + ".txt")): + if image_inds in data: + objects = data[image_inds]["objects"] + for key, value in objects.items(): + if key == 'num_obj': continue + if value["name"] not in class_names: + class_ind = replace_dict[value["name"]] + class_ind = class_names.index(class_ind) + # if value["name"] not in check_classes: + # check_classes.append(value["name"]) + # print(value["name"]) + # continue + else: + class_ind = class_names.index(value["name"]) + xmin = int(value["bndbox"]["xmin"]) + xmax = int(value["bndbox"]["xmax"]) + ymin = int(value["bndbox"]["ymin"]) + ymax = int(value["bndbox"]["ymax"]) + annotation += ' ' + ','.join([str(xmin), str(ymin), str(xmax), str(ymax), str(class_ind)]) + else: continue + f.write(annotation + "\n") + count += 1 + # print(annotation) + print(count) + return + +def main(_argv): + with open(FLAGS.coco_data, "rb") as input_file: + data = pickle.load(input_file) + data = data[1] + convert_annotation(FLAGS.anno_path_val, data) + +if __name__ == '__main__': + try: + app.run(main) + except SystemExit: + pass \ No newline at end of file diff --git a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/coco_convert.py b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/coco_convert.py new file mode 100644 index 000000000..00bbabb57 --- /dev/null +++ b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/coco_convert.py @@ -0,0 +1,112 @@ +from absl import app, flags, logging +from absl.flags import FLAGS +import cv2 +import numpy as np +import os +import json +import sys +import pickle + +flags.DEFINE_string('input', '/Volumes/Elements/data/coco_dataset/coco/annotations/instances_val2017.json', 'path to classes file') +flags.DEFINE_string('output', 'val2017.pkl', 'path to classes file') + +class COCO: + """ + Handler Class for COCO Format + """ + + @staticmethod + def parse(json_path): + + try: + json_data = json.load(open(json_path)) + + images_info = json_data["images"] + cls_info = json_data["categories"] + + data = {} + + progress_length = len(json_data["annotations"]) + progress_cnt = 0 + + for anno in json_data["annotations"]: + + image_id = anno["image_id"] + cls_id = anno["category_id"] + + filename = None + img_width = None + img_height = None + cls = None + + for info in images_info: + if info["id"] == image_id: + filename, img_width, img_height = \ + info["file_name"].split(".")[0], info["width"], info["height"] + + for category in cls_info: + if category["id"] == cls_id: + cls = category["name"] + + size = { + "width": img_width, + "height": img_height, + "depth": "3" + } + + bndbox = { + "xmin": anno["bbox"][0], + "ymin": anno["bbox"][1], + "xmax": anno["bbox"][2] + anno["bbox"][0], + "ymax": anno["bbox"][3] + anno["bbox"][1] + } + + obj_info = { + "name": cls, + "bndbox": bndbox + } + + if filename in data: + obj_idx = str(int(data[filename]["objects"]["num_obj"])) + data[filename]["objects"][str(obj_idx)] = obj_info + data[filename]["objects"]["num_obj"] = int(obj_idx) + 1 + + elif filename not in data: + + obj = { + "num_obj": "1", + "0": obj_info + } + + data[filename] = { + "size": size, + "objects": obj + } + + percent = (float(progress_cnt) / float(progress_length)) * 100 + print(str(progress_cnt) + "/" + str(progress_length) + " total: " + str(round(percent, 2))) + progress_cnt += 1 + + #print(json.dumps(data, indent=4, sort_keys = True)) + return True, data + + except Exception as e: + + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + + msg = "ERROR : {}, moreInfo : {}\t{}\t{}".format(e, exc_type, fname, exc_tb.tb_lineno) + + return False, msg + +def main(_argv): + coco = COCO() + data = coco.parse(FLAGS.input) + with open(FLAGS.output, 'wb') as handle: + pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL) + +if __name__ == '__main__': + try: + app.run(main) + except SystemExit: + pass diff --git a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/get_coco_dataset_2017.sh b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/get_coco_dataset_2017.sh new file mode 100644 index 000000000..3e7d65bf7 --- /dev/null +++ b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/get_coco_dataset_2017.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +### Recommended to run 'nohup ./ &' to prevent interruption from SSH session termination. + +wait_to_finish() { + for pid in "${download_pids[@]}"; do + while kill -0 "$pid"; do + sleep 30 + done + done +} + + +# Update for default OS specific package manager. +# sudo yum -y install java-1.8.0 +# sudo yum -y remove java-1.7.0-openjdk + +mkdir -p coco/images/ coco/annotations/ + +download_pids=() + +### 2017 COCO Dataset ### + +echo "Downloading COCO dataset..." +curl -OL "http://images.cocodataset.org/zips/val2017.zip" & +download_pids+=("$!") +curl -OL "http://images.cocodataset.org/annotations/annotations_trainval2017.zip" & +download_pids+=("$!") + +wait_to_finish download_pids + +inflate_pids=() + +unzip 'val2017.zip' -d coco/images/ & +inflate_pids+=("$!") +unzip 'annotations_trainval2017.zip' -d coco/annotations/ & # Inflates to 'coco/annotations'. +inflate_pids+=("$!") + +wait_to_finish inflate_pids \ No newline at end of file diff --git a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/google_utils.py b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/google_utils.py new file mode 100644 index 000000000..8fa50197b --- /dev/null +++ b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/google_utils.py @@ -0,0 +1,39 @@ +import os +import time + +def gdrive_download(id='1HaXkef9z6y5l4vUnCYgdmEAj61c6bfWO', name='coco.zip'): + # https://gist.github.com/tanaikech/f0f2d122e05bf5f971611258c22c110f + # Downloads a file from Google Drive, accepting presented query + # from utils.google_utils import *; gdrive_download() + t = time.time() + + print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='') + os.remove(name) if os.path.exists(name) else None # remove existing + os.remove('cookie') if os.path.exists('cookie') else None + + # Attempt file download + os.system("curl -c ./cookie -s -L \"https://drive.google.com/uc?export=download&id=%s\" > /dev/null" % id) + if os.path.exists('cookie'): # large file + s = "curl -Lb ./cookie \"https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=%s\" -o %s" % ( + id, name) + else: # small file + s = "curl -s -L -o %s 'https://drive.google.com/uc?export=download&id=%s'" % (name, id) + r = os.system(s) # execute, capture return values + os.remove('cookie') if os.path.exists('cookie') else None + + # Error check + if r != 0: + os.remove(name) if os.path.exists(name) else None # remove partial + print('Download error ') # raise Exception('Download error') + return r + + # Unzip if archive + if name.endswith('.zip'): + print('unzipping... ', end='') + os.system('unzip -q %s' % name) # unzip + os.remove(name) # remove zip to free space + + print('Done (%.1fs)' % (time.time() - t)) + return r + +# gdrive_download("1cewMfusmPjYWbrnuJRuKhPMwRe_b9PaT", name='yolov4.weights') \ No newline at end of file diff --git a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/img2bin.py b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/img2bin.py new file mode 100644 index 000000000..b499c0bae --- /dev/null +++ b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/img2bin.py @@ -0,0 +1,33 @@ +import sys +import argparse +from pathlib import Path + +import cv2 +from tqdm import tqdm + + +def main(): + args = parse_args() + img_dir = Path(args.img_dir) + bin_dir = Path(args.bin_dir) + bin_dir.mkdir(parents=True, exist_ok=True) + + assert img_dir.exists(), 'img_dir is not exists!!!' + + for file in tqdm(list(img_dir.glob('*.*'))): + img = cv2.imread(str(file)) + img.tofile(bin_dir / (file.name.rsplit('.')[0] + '.bin')) + + print('img to bin solve over!!!') + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--img-dir", dest="img_dir", default="../coco/images") + parser.add_argument("--bin-dir", dest="bin_dir", default="../coco/input_bins") + args = parser.parse_args() + return args + + +if __name__ == '__main__': + main() diff --git a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/README.md b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/README.md new file mode 100644 index 000000000..123f88485 --- /dev/null +++ b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/README.md @@ -0,0 +1,27 @@ +# VOC Dataset + +### Download + +```bash +$ bash get_voc2012.sh +``` + +### Make names for VOC. + +```bash +$ python voc_make_names.py [--anno_dir {Annotation directory}] [--output {OUTPUT_NAME}] + +# example +$ python voc_make_name.py + +$ python voc_make_name.py --anno_dir ../../data/voc/anno --output ../../data/classes/voc.names +``` + +### Convert VOC Dataset. + +```bash +$ python voc_convert.py [--image_dir {Image directory}] [--anno_dir {Annotation directory}] [--train_list_txt {Path of Train list file}] [--val_list_txt {Path of Validation list file}] [--classes {Path of Classes file}] [--train_output {Path of Output file For Train}] [--val_output {Path of Output file For Val}] + +#example +$ python voc_convert.py +``` diff --git a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/get_voc2012.sh b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/get_voc2012.sh new file mode 100644 index 000000000..91d748b1f --- /dev/null +++ b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/get_voc2012.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + + +PARENT_DIR="$( cd "$( dirname $( dirname $( dirname "${BASH_SOURCE[0]}" ) ) )" >/dev/null 2>&1 && pwd )" +DATA_DIR="$PARENT_DIR/data" + +DATASET_NAME="VOCtrainval_11-May-2012" + +wget -c -P $DATA_DIR http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar + +if [[ -d "$DATA_DIR/$DATASET_NAME" ]]; then + echo "Already '$DATA_DIR/$DATASET_NAME' path exists." + exit 1 +fi + +mkdir $DATA_DIR/$DATASET_NAME + +tar xf $DATA_DIR/VOCtrainval_11-May-2012.tar -C $DATA_DIR/$DATASET_NAME + diff --git a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/voc_convert.py b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/voc_convert.py new file mode 100644 index 000000000..d9c9bbdf2 --- /dev/null +++ b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/voc_convert.py @@ -0,0 +1,78 @@ +import sys +import os + +from absl import app, flags +from absl.flags import FLAGS +from lxml import etree + + +flags.DEFINE_string('image_dir', '../../data/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/JPEGImages', 'path to image dir') +flags.DEFINE_string('anno_dir', '../../data/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/Annotations', 'path to anno dir') +flags.DEFINE_string('train_list_txt', '../../data/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/ImageSets/Main/train.txt', 'path to a set of train') +flags.DEFINE_string('val_list_txt', '../../data/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/ImageSets/Main/val.txt', 'path to a set of val') +flags.DEFINE_string('classes', '../../data/classes/voc2012.names', 'path to a list of class names') +flags.DEFINE_string('train_output', '../../data/dataset/voc2012_train.txt', 'path to a file for train') +flags.DEFINE_string('val_output', '../../data/dataset/voc2012_val.txt', 'path to a file for val') + +flags.DEFINE_boolean('no_val', False, 'if uses this flag, it does not convert a list of val') + + +def convert_annotation(list_txt, output_path, image_dir, anno_dir, class_names): + IMAGE_EXT = '.jpg' + ANNO_EXT = '.xml' + + with open(list_txt, 'r') as f, open(output_path, 'w') as wf: + while True: + line = f.readline().strip() + if line is None or not line: + break + im_p = os.path.join(image_dir, line + IMAGE_EXT) + an_p = os.path.join(anno_dir, line + ANNO_EXT) + + # Get annotation. + root = etree.parse(an_p).getroot() + bboxes = root.xpath('//object/bndbox') + names = root.xpath('//object/name') + + box_annotations = [] + for b, n in zip(bboxes, names): + name = n.text + class_idx = class_names.index(name) + + xmin = b.find('xmin').text + ymin = b.find('ymin').text + xmax = b.find('xmax').text + ymax = b.find('ymax').text + box_annotations.append(','.join([str(xmin), str(ymin), str(xmax), str(ymax), str(class_idx)])) + + annotation = os.path.abspath(im_p) + ' ' + ' '.join(box_annotations) + '\n' + + wf.write(annotation) + + +def convert_voc(image_dir, anno_dir, train_list_txt, val_list_txt, classes, train_output, val_output, no_val): + IMAGE_EXT = '.jpg' + ANNO_EXT = '.xml' + + class_names = [c.strip() for c in open(FLAGS.classes).readlines()] + + # Training set. + convert_annotation(train_list_txt, train_output, image_dir, anno_dir, class_names) + + if no_val: + return + + # Validation set. + convert_annotation(val_list_txt, val_output, image_dir, anno_dir, class_names) + + +def main(_argv): + convert_voc(FLAGS.image_dir, FLAGS.anno_dir, FLAGS.train_list_txt, FLAGS.val_list_txt, FLAGS.classes, FLAGS.train_output, FLAGS.val_output, FLAGS.no_val) + print("Complete convert voc data!") + + +if __name__ == "__main__": + try: + app.run(main) + except SystemExit: + pass diff --git a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/voc_make_names.py b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/voc_make_names.py new file mode 100644 index 000000000..a4c736609 --- /dev/null +++ b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc/voc_make_names.py @@ -0,0 +1,46 @@ +import sys +import os + +from absl import app, flags +from absl.flags import FLAGS +from lxml import etree + + +flags.DEFINE_string('anno_dir', '../../data/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/Annotations', 'path to anno dir') +flags.DEFINE_string('output', '../../data/classes/voc2012.names', 'path to anno dir') + + +def make_names(anno_dir, output): + labels_dict = {} + + anno_list = os.listdir(anno_dir) + + for anno_file in anno_list: + p = os.path.join(anno_dir, anno_file) + + # Get annotation. + root = etree.parse(p).getroot() + names = root.xpath('//object/name') + + for n in names: + labels_dict[n.text] = 0 + + labels = list(labels_dict.keys()) + labels.sort() + + with open(output, 'w') as f: + for l in labels: + f.writelines(l + '\n') + + print(f"Done making a names's file ({os.path.abspath(output)})") + + +def main(_argv): + make_names(FLAGS.anno_dir, FLAGS.output) + + +if __name__ == "__main__": + try: + app.run(main) + except SystemExit: + pass diff --git a/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc_annotation.py b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc_annotation.py new file mode 100644 index 000000000..6d53718a2 --- /dev/null +++ b/ACL_TensorFlow/contrib/cv/Yolov5_for_ACL/scripts/voc_annotation.py @@ -0,0 +1,52 @@ +import os +import argparse +import xml.etree.ElementTree as ET + +def convert_voc_annotation(data_path, data_type, anno_path, use_difficult_bbox=True): + + classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', + 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', + 'train', 'tvmonitor'] + img_inds_file = os.path.join(data_path, 'ImageSets', 'Main', data_type + '.txt') + with open(img_inds_file, 'r') as f: + txt = f.readlines() + image_inds = [line.strip() for line in txt] + + with open(anno_path, 'a') as f: + for image_ind in image_inds: + image_path = os.path.join(data_path, 'JPEGImages', image_ind + '.jpg') + annotation = image_path + label_path = os.path.join(data_path, 'Annotations', image_ind + '.xml') + root = ET.parse(label_path).getroot() + objects = root.findall('object') + for obj in objects: + difficult = obj.find('difficult').text.strip() + if (not use_difficult_bbox) and(int(difficult) == 1): + continue + bbox = obj.find('bndbox') + class_ind = classes.index(obj.find('name').text.lower().strip()) + xmin = bbox.find('xmin').text.strip() + xmax = bbox.find('xmax').text.strip() + ymin = bbox.find('ymin').text.strip() + ymax = bbox.find('ymax').text.strip() + annotation += ' ' + ','.join([xmin, ymin, xmax, ymax, str(class_ind)]) + print(annotation) + f.write(annotation + "\n") + return len(image_inds) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("--data_path", default="/home/VOC/") + parser.add_argument("--train_annotation", default="./data/dataset/voc_train.txt") + parser.add_argument("--test_annotation", default="./data/dataset/voc_test.txt") + flags = parser.parse_args() + + if os.path.exists(flags.train_annotation):os.remove(flags.train_annotation) + if os.path.exists(flags.test_annotation):os.remove(flags.test_annotation) + + num1 = convert_voc_annotation(os.path.join(flags.data_path, 'train/VOCdevkit/VOC2007'), 'trainval', flags.train_annotation, False) + num2 = convert_voc_annotation(os.path.join(flags.data_path, 'train/VOCdevkit/VOC2012'), 'trainval', flags.train_annotation, False) + num3 = convert_voc_annotation(os.path.join(flags.data_path, 'test/VOCdevkit/VOC2007'), 'test', flags.test_annotation, False) + print('=> The number of image for train is: %d\tThe number of image for test is:%d' %(num1 + num2, num3)) -- Gitee