From eeaaa4279f51f695f5b9d158109dec22e41245f8 Mon Sep 17 00:00:00 2001 From: shenwei41 Date: Fri, 8 Aug 2025 11:57:01 +0800 Subject: [PATCH] change map parm --- community/cv/pointrend/maskrcnn_pointrend/src/dataset.py | 2 -- research/cv/CFDT/src/dataset/dataset.py | 1 - research/cv/FSAF/src/dataset.py | 2 -- research/cv/Focus-DETR/models/focus_detr/dataset.py | 1 - research/cv/FreeAnchor/src/data/dataset.py | 2 -- research/cv/GridRCNN/src/dataset.py | 2 -- research/cv/PVAnet/src/dataset.py | 2 -- research/cv/SPADE/src/data/__init__.py | 2 -- research/cv/faster_rcnn_ssod/src/dataset.py | 6 ------ research/cv/unisiam/train.py | 2 +- 10 files changed, 1 insertion(+), 21 deletions(-) diff --git a/community/cv/pointrend/maskrcnn_pointrend/src/dataset.py b/community/cv/pointrend/maskrcnn_pointrend/src/dataset.py index d96e9e45c..b3d8cf6f3 100644 --- a/community/cv/pointrend/maskrcnn_pointrend/src/dataset.py +++ b/community/cv/pointrend/maskrcnn_pointrend/src/dataset.py @@ -605,7 +605,6 @@ def create_maskrcnn_dataset(mindrecord_file, batch_size=2, device_num=1, rank_id ds = ds.map(operations=compose_map_func, input_columns=["image", "annotation", "mask", "mask_shape"], output_columns=["image", "image_shape", "box", "label", "valid_num", "mask"], - column_order=["image", "image_shape", "box", "label", "valid_num", "mask"], python_multiprocessing=False, num_parallel_workers=num_parallel_workers) ds = ds.batch(batch_size, drop_remainder=True, pad_info={"mask": ([config.max_instance_count, None, None], 0)}) @@ -616,7 +615,6 @@ def create_maskrcnn_dataset(mindrecord_file, batch_size=2, device_num=1, rank_id ds = ds.map(operations=compose_map_func, input_columns=["image_id", "image", "annotation", "mask", "mask_shape"], output_columns=["image_id", "image", "image_shape", "box", "label", "valid_num", "mask"], - column_order=["image_id", "image", "image_shape", "box", "label", "valid_num", "mask"], num_parallel_workers=num_parallel_workers) ds = ds.batch(batch_size, drop_remainder=True) diff --git a/research/cv/CFDT/src/dataset/dataset.py b/research/cv/CFDT/src/dataset/dataset.py index 18b7ccc4f..3c54cd4ba 100644 --- a/research/cv/CFDT/src/dataset/dataset.py +++ b/research/cv/CFDT/src/dataset/dataset.py @@ -185,7 +185,6 @@ def build_dataset(cfg): partial(pad_image_to_max_size, max_size=cfg.max_img_size * 1328 // 800), input_columns=['image'], output_columns=['image', 'mask'], - column_order=['image', 'mask', 'bboxes', 'labels', 'orig_sizes', 'n_boxes', 'img_id'], num_parallel_workers=cfg.num_workers ) if cfg.eval: diff --git a/research/cv/FSAF/src/dataset.py b/research/cv/FSAF/src/dataset.py index 63b7d52fa..2c3dc9c6b 100644 --- a/research/cv/FSAF/src/dataset.py +++ b/research/cv/FSAF/src/dataset.py @@ -355,7 +355,6 @@ def create_mindrecord_dataset( ds = ds.map( input_columns=["image", "annotation", "img_id"], output_columns=["image", "image_shape", "box", "label", "valid_num"], - column_order=['image', 'image_shape', 'box', 'label', 'valid_num'], operations=compose_map_func, python_multiprocessing=bool(python_multiprocessing), num_parallel_workers=config.num_parallel_workers @@ -410,7 +409,6 @@ def create_coco_det_dataset( input_columns=['image', 'bbox', 'category_id', 'iscrowd'], output_columns=['image', 'image_shape', 'box', 'label', 'valid_num'], operations=compose_map_func, - column_order=['image', 'image_shape', 'box', 'label', 'valid_num'], python_multiprocessing=python_multiprocessing, num_parallel_workers=config.num_parallel_workers ) diff --git a/research/cv/Focus-DETR/models/focus_detr/dataset.py b/research/cv/Focus-DETR/models/focus_detr/dataset.py index bc62edefd..3d8b5268c 100644 --- a/research/cv/Focus-DETR/models/focus_detr/dataset.py +++ b/research/cv/Focus-DETR/models/focus_detr/dataset.py @@ -182,7 +182,6 @@ def build_dataset(cfg): partial(pad_image_to_max_size, max_size=cfg.max_img_size), input_columns=["image"], output_columns=["image", "mask"], - column_order=["image", "mask", "bboxes", "labels", "orig_sizes", "n_boxes", "img_id"], num_parallel_workers=cfg.num_workers, ) dataset = dataset.batch(cfg.batch_size) diff --git a/research/cv/FreeAnchor/src/data/dataset.py b/research/cv/FreeAnchor/src/data/dataset.py index e0fb5b1b4..e5bebdcd4 100644 --- a/research/cv/FreeAnchor/src/data/dataset.py +++ b/research/cv/FreeAnchor/src/data/dataset.py @@ -353,7 +353,6 @@ def create_mindrecord_dataset( ds = ds.map( input_columns=["image", "annotation", "img_id"], output_columns=["image", "image_shape", "box", "label", "valid_num"], - column_order=['image', 'image_shape', 'box', 'label', 'valid_num'], operations=compose_map_func, python_multiprocessing=python_multiprocessing, num_parallel_workers=config.num_parallel_workers @@ -403,7 +402,6 @@ def create_coco_det_dataset( output_columns=[ 'image', 'image_shape', 'box', 'label', 'valid_num' ], - column_order=['image', 'image_shape', 'box', 'label', 'valid_num'], operations=compose_map_func, python_multiprocessing=python_multiprocessing, num_parallel_workers=config.num_parallel_workers diff --git a/research/cv/GridRCNN/src/dataset.py b/research/cv/GridRCNN/src/dataset.py index 32fee7dfe..ebd3558b4 100644 --- a/research/cv/GridRCNN/src/dataset.py +++ b/research/cv/GridRCNN/src/dataset.py @@ -353,7 +353,6 @@ def create_mindrecord_dataset( ds = ds.map( input_columns=["image", "annotation", "img_id"], output_columns=["image", "image_shape", "box", "label", "valid_num"], - column_order=['image', 'image_shape', 'box', 'label', 'valid_num'], operations=compose_map_func, python_multiprocessing=python_multiprocessing, num_parallel_workers=config.num_parallel_workers @@ -403,7 +402,6 @@ def create_coco_det_dataset( output_columns=[ 'image', 'image_shape', 'box', 'label', 'valid_num' ], - column_order=['image', 'image_shape', 'box', 'label', 'valid_num'], operations=compose_map_func, python_multiprocessing=python_multiprocessing, num_parallel_workers=config.num_parallel_workers diff --git a/research/cv/PVAnet/src/dataset.py b/research/cv/PVAnet/src/dataset.py index 3443d3416..0917ff2a7 100644 --- a/research/cv/PVAnet/src/dataset.py +++ b/research/cv/PVAnet/src/dataset.py @@ -553,14 +553,12 @@ def create_fasterrcnn_dataset(config, mindrecord_file, batch_size=2, device_num= if is_training: ds = ds.map(input_columns=["image", "annotation"], output_columns=["image", "image_shape", "box", "label", "valid_num"], - column_order=["image", "image_shape", "box", "label", "valid_num"], operations=compose_map_func, python_multiprocessing=python_multiprocessing, num_parallel_workers=num_parallel_workers) ds = ds.batch(batch_size, drop_remainder=True) else: ds = ds.map(input_columns=["image", "annotation"], output_columns=["image", "image_shape", "box", "label", "valid_num"], - column_order=["image", "image_shape", "box", "label", "valid_num"], operations=compose_map_func, num_parallel_workers=num_parallel_workers) ds = ds.batch(batch_size, drop_remainder=True) diff --git a/research/cv/SPADE/src/data/__init__.py b/research/cv/SPADE/src/data/__init__.py index e123a8711..c9b9f7950 100644 --- a/research/cv/SPADE/src/data/__init__.py +++ b/research/cv/SPADE/src/data/__init__.py @@ -64,7 +64,6 @@ class DatasetInit: dataset = dataset.map(operations=[self.preprocess_input], input_columns=['label', 'instance'], output_columns=['label', 'instance', 'input_semantics'], - column_order=["label", "instance", "image", 'input_semantics'], num_parallel_workers=8) return dataset @@ -77,7 +76,6 @@ class DatasetInit: dataset = dataset.map(operations=[self.preprocess_input], input_columns=['label', 'instance'], output_columns=['label', 'instance', 'input_semantics'], - column_order=["label", "instance", "image", 'input_semantics'], num_parallel_workers=8) return dataset diff --git a/research/cv/faster_rcnn_ssod/src/dataset.py b/research/cv/faster_rcnn_ssod/src/dataset.py index 0e595578d..8f0bd0630 100644 --- a/research/cv/faster_rcnn_ssod/src/dataset.py +++ b/research/cv/faster_rcnn_ssod/src/dataset.py @@ -353,10 +353,6 @@ def create_semisup_dataset(cfg, is_training=True): "label_gt_bboxes", "label_gt_labels", "label_gt_nums", "unlabel_img_strong", "unlabel_img_weak", "unlabel_img_metas", "unlabel_gt_bboxes", "unlabel_gt_labels", "unlabel_gt_nums"], - column_order=["label_img_strong", "label_img_weak", "label_img_metas", - "label_gt_bboxes", "label_gt_labels", "label_gt_nums", - "unlabel_img_strong", "unlabel_img_weak", "unlabel_img_metas", - "unlabel_gt_bboxes", "unlabel_gt_labels", "unlabel_gt_nums"], num_parallel_workers=cfg.num_parallel_workers) data_loader = data_loader.batch(cfg.batch_size, drop_remainder=True) data_loader = data_loader.repeat(-1) @@ -371,8 +367,6 @@ def create_semisup_dataset(cfg, is_training=True): input_columns=["label_img", "label_annos"], output_columns=["label_img_weak", "label_img_metas", "label_gt_bboxes", "label_gt_labels", "label_gt_nums"], - column_order=["label_img_weak", "label_img_metas", "label_gt_bboxes", - "label_gt_labels", "label_gt_nums", "label_img_id"], num_parallel_workers=cfg.num_parallel_workers) data_loader = data_loader.batch(cfg.test_batch_size, drop_remainder=False) diff --git a/research/cv/unisiam/train.py b/research/cv/unisiam/train.py index e009e2f54..81d42fa74 100644 --- a/research/cv/unisiam/train.py +++ b/research/cv/unisiam/train.py @@ -93,7 +93,7 @@ def build_train_loader(args, device_num=None, rank_id=None): num_shards=device_num, shard_id=rank_id) train_dataset = train_dataset.map( operations=copy_column, input_columns=["image", "label"], output_columns=["image1", "image2", "label"], - column_order=["image1", "image2", "label"], num_parallel_workers=args.num_workers) + num_parallel_workers=args.num_workers) train_dataset = train_dataset.map(operations=train_transform, input_columns=["image1"], num_parallel_workers=args.num_workers, python_multiprocessing=True) train_dataset = train_dataset.map(operations=train_transform, input_columns=["image2"], -- Gitee